aboutsummaryrefslogtreecommitdiffstats
path: root/keystone-moon/keystone/common
diff options
context:
space:
mode:
authorWuKong <rebirthmonkey@gmail.com>2015-06-30 18:47:29 +0200
committerWuKong <rebirthmonkey@gmail.com>2015-06-30 18:47:29 +0200
commitb8c756ecdd7cced1db4300935484e8c83701c82e (patch)
tree87e51107d82b217ede145de9d9d59e2100725bd7 /keystone-moon/keystone/common
parentc304c773bae68fb854ed9eab8fb35c4ef17cf136 (diff)
migrate moon code from github to opnfv
Change-Id: Ice53e368fd1114d56a75271aa9f2e598e3eba604 Signed-off-by: WuKong <rebirthmonkey@gmail.com>
Diffstat (limited to 'keystone-moon/keystone/common')
-rw-r--r--keystone-moon/keystone/common/__init__.py0
-rw-r--r--keystone-moon/keystone/common/authorization.py87
-rw-r--r--keystone-moon/keystone/common/base64utils.py396
-rw-r--r--keystone-moon/keystone/common/cache/__init__.py15
-rw-r--r--keystone-moon/keystone/common/cache/_memcache_pool.py233
-rw-r--r--keystone-moon/keystone/common/cache/backends/__init__.py0
-rw-r--r--keystone-moon/keystone/common/cache/backends/memcache_pool.py61
-rw-r--r--keystone-moon/keystone/common/cache/backends/mongo.py557
-rw-r--r--keystone-moon/keystone/common/cache/backends/noop.py49
-rw-r--r--keystone-moon/keystone/common/cache/core.py308
-rw-r--r--keystone-moon/keystone/common/config.py1118
-rw-r--r--keystone-moon/keystone/common/controller.py800
-rw-r--r--keystone-moon/keystone/common/dependency.py311
-rw-r--r--keystone-moon/keystone/common/driver_hints.py65
-rw-r--r--keystone-moon/keystone/common/environment/__init__.py100
-rw-r--r--keystone-moon/keystone/common/environment/eventlet_server.py194
-rw-r--r--keystone-moon/keystone/common/extension.py45
-rw-r--r--keystone-moon/keystone/common/json_home.py76
-rw-r--r--keystone-moon/keystone/common/kvs/__init__.py33
-rw-r--r--keystone-moon/keystone/common/kvs/backends/__init__.py0
-rw-r--r--keystone-moon/keystone/common/kvs/backends/inmemdb.py69
-rw-r--r--keystone-moon/keystone/common/kvs/backends/memcached.py188
-rw-r--r--keystone-moon/keystone/common/kvs/core.py423
-rw-r--r--keystone-moon/keystone/common/kvs/legacy.py60
-rw-r--r--keystone-moon/keystone/common/ldap/__init__.py15
-rw-r--r--keystone-moon/keystone/common/ldap/core.py1910
-rw-r--r--keystone-moon/keystone/common/manager.py76
-rw-r--r--keystone-moon/keystone/common/models.py182
-rw-r--r--keystone-moon/keystone/common/openssl.py347
-rwxr-xr-xkeystone-moon/keystone/common/pemutils.py509
-rw-r--r--keystone-moon/keystone/common/router.py80
-rw-r--r--keystone-moon/keystone/common/sql/__init__.py15
-rw-r--r--keystone-moon/keystone/common/sql/core.py431
-rw-r--r--keystone-moon/keystone/common/sql/migrate_repo/README4
-rw-r--r--keystone-moon/keystone/common/sql/migrate_repo/__init__.py17
-rw-r--r--keystone-moon/keystone/common/sql/migrate_repo/manage.py5
-rw-r--r--keystone-moon/keystone/common/sql/migrate_repo/migrate.cfg25
-rw-r--r--keystone-moon/keystone/common/sql/migrate_repo/versions/044_icehouse.py279
-rw-r--r--keystone-moon/keystone/common/sql/migrate_repo/versions/045_placeholder.py25
-rw-r--r--keystone-moon/keystone/common/sql/migrate_repo/versions/046_placeholder.py25
-rw-r--r--keystone-moon/keystone/common/sql/migrate_repo/versions/047_placeholder.py25
-rw-r--r--keystone-moon/keystone/common/sql/migrate_repo/versions/048_placeholder.py25
-rw-r--r--keystone-moon/keystone/common/sql/migrate_repo/versions/049_placeholder.py25
-rw-r--r--keystone-moon/keystone/common/sql/migrate_repo/versions/050_fk_consistent_indexes.py49
-rw-r--r--keystone-moon/keystone/common/sql/migrate_repo/versions/051_add_id_mapping.py49
-rw-r--r--keystone-moon/keystone/common/sql/migrate_repo/versions/052_add_auth_url_to_region.py34
-rw-r--r--keystone-moon/keystone/common/sql/migrate_repo/versions/053_endpoint_to_region_association.py156
-rw-r--r--keystone-moon/keystone/common/sql/migrate_repo/versions/054_add_actor_id_index.py35
-rw-r--r--keystone-moon/keystone/common/sql/migrate_repo/versions/055_add_indexes_to_token_table.py35
-rw-r--r--keystone-moon/keystone/common/sql/migrate_repo/versions/056_placeholder.py22
-rw-r--r--keystone-moon/keystone/common/sql/migrate_repo/versions/057_placeholder.py22
-rw-r--r--keystone-moon/keystone/common/sql/migrate_repo/versions/058_placeholder.py22
-rw-r--r--keystone-moon/keystone/common/sql/migrate_repo/versions/059_placeholder.py22
-rw-r--r--keystone-moon/keystone/common/sql/migrate_repo/versions/060_placeholder.py22
-rw-r--r--keystone-moon/keystone/common/sql/migrate_repo/versions/061_add_parent_project.py54
-rw-r--r--keystone-moon/keystone/common/sql/migrate_repo/versions/062_drop_assignment_role_fk.py41
-rw-r--r--keystone-moon/keystone/common/sql/migrate_repo/versions/063_drop_region_auth_url.py32
-rw-r--r--keystone-moon/keystone/common/sql/migrate_repo/versions/064_drop_user_and_group_fk.py45
-rw-r--r--keystone-moon/keystone/common/sql/migrate_repo/versions/065_add_domain_config.py55
-rw-r--r--keystone-moon/keystone/common/sql/migrate_repo/versions/066_fixup_service_name_value.py43
-rw-r--r--keystone-moon/keystone/common/sql/migrate_repo/versions/__init__.py0
-rw-r--r--keystone-moon/keystone/common/sql/migration_helpers.py258
-rw-r--r--keystone-moon/keystone/common/utils.py471
-rw-r--r--keystone-moon/keystone/common/validation/__init__.py62
-rw-r--r--keystone-moon/keystone/common/validation/parameter_types.py57
-rw-r--r--keystone-moon/keystone/common/validation/validators.py59
-rw-r--r--keystone-moon/keystone/common/wsgi.py830
67 files changed, 11683 insertions, 0 deletions
diff --git a/keystone-moon/keystone/common/__init__.py b/keystone-moon/keystone/common/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/keystone-moon/keystone/common/__init__.py
diff --git a/keystone-moon/keystone/common/authorization.py b/keystone-moon/keystone/common/authorization.py
new file mode 100644
index 00000000..5cb1e630
--- /dev/null
+++ b/keystone-moon/keystone/common/authorization.py
@@ -0,0 +1,87 @@
+# Copyright 2012 OpenStack Foundation
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# Copyright 2011 - 2012 Justin Santa Barbara
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from oslo_log import log
+
+from keystone import exception
+from keystone.i18n import _, _LW
+from keystone.models import token_model
+
+
+AUTH_CONTEXT_ENV = 'KEYSTONE_AUTH_CONTEXT'
+"""Environment variable used to convey the Keystone auth context.
+
+Auth context is essentially the user credential used for policy enforcement.
+It is a dictionary with the following attributes:
+
+* ``user_id``: user ID of the principal
+* ``project_id`` (optional): project ID of the scoped project if auth is
+ project-scoped
+* ``domain_id`` (optional): domain ID of the scoped domain if auth is
+ domain-scoped
+* ``roles`` (optional): list of role names for the given scope
+* ``group_ids``: list of group IDs for which the API user has membership
+
+"""
+
+LOG = log.getLogger(__name__)
+
+
+def token_to_auth_context(token):
+ if not isinstance(token, token_model.KeystoneToken):
+ raise exception.UnexpectedError(_('token reference must be a '
+ 'KeystoneToken type, got: %s') %
+ type(token))
+ auth_context = {'token': token,
+ 'is_delegated_auth': False}
+ try:
+ auth_context['user_id'] = token.user_id
+ except KeyError:
+ LOG.warning(_LW('RBAC: Invalid user data in token'))
+ raise exception.Unauthorized()
+
+ if token.project_scoped:
+ auth_context['project_id'] = token.project_id
+ elif token.domain_scoped:
+ auth_context['domain_id'] = token.domain_id
+ else:
+ LOG.debug('RBAC: Proceeding without project or domain scope')
+
+ if token.trust_scoped:
+ auth_context['is_delegated_auth'] = True
+ auth_context['trust_id'] = token.trust_id
+ auth_context['trustor_id'] = token.trustor_user_id
+ auth_context['trustee_id'] = token.trustee_user_id
+ else:
+ auth_context['trust_id'] = None
+ auth_context['trustor_id'] = None
+ auth_context['trustee_id'] = None
+
+ roles = token.role_names
+ if roles:
+ auth_context['roles'] = roles
+
+ if token.oauth_scoped:
+ auth_context['is_delegated_auth'] = True
+ auth_context['consumer_id'] = token.oauth_consumer_id
+ auth_context['access_token_id'] = token.oauth_access_token_id
+
+ if token.is_federated_user:
+ auth_context['group_ids'] = token.federation_group_ids
+
+ return auth_context
diff --git a/keystone-moon/keystone/common/base64utils.py b/keystone-moon/keystone/common/base64utils.py
new file mode 100644
index 00000000..1a636f9b
--- /dev/null
+++ b/keystone-moon/keystone/common/base64utils.py
@@ -0,0 +1,396 @@
+# Copyright 2013 Red Hat, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+
+Python provides the base64 module as a core module but this is mostly
+limited to encoding and decoding base64 and it's variants. It is often
+useful to be able to perform other operations on base64 text. This
+module is meant to be used in conjunction with the core base64 module.
+
+Standardized base64 is defined in
+RFC-4648 "The Base16, Base32, and Base64 Data Encodings".
+
+This module provides the following base64 utility functionality:
+
+ * tests if text is valid base64
+ * filter formatting from base64
+ * convert base64 between different alphabets
+ * Handle padding issues
+ - test if base64 is padded
+ - removes padding
+ - restores padding
+ * wraps base64 text into formatted blocks
+ - via iterator
+ - return formatted string
+
+"""
+
+import re
+import string
+
+import six
+from six.moves import urllib
+
+from keystone.i18n import _
+
+
+class InvalidBase64Error(ValueError):
+ pass
+
+base64_alphabet_re = re.compile(r'^[^A-Za-z0-9+/=]+$')
+base64url_alphabet_re = re.compile(r'^[^A-Za-z0-9---_=]+$')
+
+base64_non_alphabet_re = re.compile(r'[^A-Za-z0-9+/=]+')
+base64url_non_alphabet_re = re.compile(r'[^A-Za-z0-9---_=]+')
+
+_strip_formatting_re = re.compile(r'\s+')
+
+_base64_to_base64url_trans = string.maketrans('+/', '-_')
+_base64url_to_base64_trans = string.maketrans('-_', '+/')
+
+
+def _check_padding_length(pad):
+ if len(pad) != 1:
+ raise ValueError(_('pad must be single character'))
+
+
+def is_valid_base64(text):
+ """Test if input text can be base64 decoded.
+
+ :param text: input base64 text
+ :type text: string
+ :returns: bool -- True if text can be decoded as base64, False otherwise
+ """
+
+ text = filter_formatting(text)
+
+ if base64_non_alphabet_re.search(text):
+ return False
+
+ try:
+ return base64_is_padded(text)
+ except InvalidBase64Error:
+ return False
+
+
+def is_valid_base64url(text):
+ """Test if input text can be base64url decoded.
+
+ :param text: input base64 text
+ :type text: string
+ :returns: bool -- True if text can be decoded as base64url,
+ False otherwise
+ """
+
+ text = filter_formatting(text)
+
+ if base64url_non_alphabet_re.search(text):
+ return False
+
+ try:
+ return base64_is_padded(text)
+ except InvalidBase64Error:
+ return False
+
+
+def filter_formatting(text):
+ """Return base64 text without any formatting, just the base64.
+
+ Base64 text is often formatted with whitespace, line endings,
+ etc. This function strips out any formatting, the result will
+ contain only base64 characters.
+
+ Note, this function does not filter out all non-base64 alphabet
+ characters, it only removes characters used for formatting.
+
+ :param text: input text to filter
+ :type text: string
+ :returns: string -- filtered text without formatting
+ """
+ return _strip_formatting_re.sub('', text)
+
+
+def base64_to_base64url(text):
+ """Convert base64 text to base64url text.
+
+ base64url text is designed to be safe for use in file names and
+ URL's. It is defined in RFC-4648 Section 5.
+
+ base64url differs from base64 in the last two alphabet characters
+ at index 62 and 63, these are sometimes referred as the
+ altchars. The '+' character at index 62 is replaced by '-'
+ (hyphen) and the '/' character at index 63 is replaced by '_'
+ (underscore).
+
+ This function only translates the altchars, non-alphabet
+ characters are not filtered out.
+
+ WARNING::
+
+ base64url continues to use the '=' pad character which is NOT URL
+ safe. RFC-4648 suggests two alternate methods to deal with this:
+
+ percent-encode
+ percent-encode the pad character (e.g. '=' becomes
+ '%3D'). This makes the base64url text fully safe. But
+ percent-encoding has the downside of requiring
+ percent-decoding prior to feeding the base64url text into a
+ base64url decoder since most base64url decoders do not
+ recognize %3D as a pad character and most decoders require
+ correct padding.
+
+ no-padding
+ padding is not strictly necessary to decode base64 or
+ base64url text, the pad can be computed from the input text
+ length. However many decoders demand padding and will consider
+ non-padded text to be malformed. If one wants to omit the
+ trailing pad character(s) for use in URL's it can be added back
+ using the base64_assure_padding() function.
+
+ This function makes no decisions about which padding methodology to
+ use. One can either call base64_strip_padding() to remove any pad
+ characters (restoring later with base64_assure_padding()) or call
+ base64url_percent_encode() to percent-encode the pad characters.
+
+ :param text: input base64 text
+ :type text: string
+ :returns: string -- base64url text
+ """
+ return text.translate(_base64_to_base64url_trans)
+
+
+def base64url_to_base64(text):
+ """Convert base64url text to base64 text.
+
+ See base64_to_base64url() for a description of base64url text and
+ it's issues.
+
+ This function does NOT handle percent-encoded pad characters, they
+ will be left intact. If the input base64url text is
+ percent-encoded you should call
+
+ :param text: text in base64url alphabet
+ :type text: string
+ :returns: string -- text in base64 alphabet
+
+ """
+ return text.translate(_base64url_to_base64_trans)
+
+
+def base64_is_padded(text, pad='='):
+ """Test if the text is base64 padded.
+
+ The input text must be in a base64 alphabet. The pad must be a
+ single character. If the text has been percent-encoded (e.g. pad
+ is the string '%3D') you must convert the text back to a base64
+ alphabet (e.g. if percent-encoded use the function
+ base64url_percent_decode()).
+
+ :param text: text containing ONLY characters in a base64 alphabet
+ :type text: string
+ :param pad: pad character (must be single character) (default: '=')
+ :type pad: string
+ :returns: bool -- True if padded, False otherwise
+ :raises: ValueError, InvalidBase64Error
+ """
+
+ _check_padding_length(pad)
+
+ text_len = len(text)
+ if text_len > 0 and text_len % 4 == 0:
+ pad_index = text.find(pad)
+ if pad_index >= 0 and pad_index < text_len - 2:
+ raise InvalidBase64Error(_('text is multiple of 4, '
+ 'but pad "%s" occurs before '
+ '2nd to last char') % pad)
+ if pad_index == text_len - 2 and text[-1] != pad:
+ raise InvalidBase64Error(_('text is multiple of 4, '
+ 'but pad "%s" occurs before '
+ 'non-pad last char') % pad)
+ return True
+
+ if text.find(pad) >= 0:
+ raise InvalidBase64Error(_('text is not a multiple of 4, '
+ 'but contains pad "%s"') % pad)
+ return False
+
+
+def base64url_percent_encode(text):
+ """Percent-encode base64url padding.
+
+ The input text should only contain base64url alphabet
+ characters. Any non-base64url alphabet characters will also be
+ subject to percent-encoding.
+
+ :param text: text containing ONLY characters in the base64url alphabet
+ :type text: string
+ :returns: string -- percent-encoded base64url text
+ :raises: InvalidBase64Error
+ """
+
+ if len(text) % 4 != 0:
+ raise InvalidBase64Error(_('padded base64url text must be '
+ 'multiple of 4 characters'))
+
+ return urllib.parse.quote(text)
+
+
+def base64url_percent_decode(text):
+ """Percent-decode base64url padding.
+
+ The input text should only contain base64url alphabet
+ characters and the percent-encoded pad character. Any other
+ percent-encoded characters will be subject to percent-decoding.
+
+ :param text: base64url alphabet text
+ :type text: string
+ :returns: string -- percent-decoded base64url text
+ """
+
+ decoded_text = urllib.parse.unquote(text)
+
+ if len(decoded_text) % 4 != 0:
+ raise InvalidBase64Error(_('padded base64url text must be '
+ 'multiple of 4 characters'))
+
+ return decoded_text
+
+
+def base64_strip_padding(text, pad='='):
+ """Remove padding from input base64 text.
+
+ :param text: text containing ONLY characters in a base64 alphabet
+ :type text: string
+ :param pad: pad character (must be single character) (default: '=')
+ :type pad: string
+ :returns: string -- base64 text without padding
+ :raises: ValueError
+ """
+ _check_padding_length(pad)
+
+ # Can't be padded if text is less than 4 characters.
+ if len(text) < 4:
+ return text
+
+ if text[-1] == pad:
+ if text[-2] == pad:
+ return text[0:-2]
+ else:
+ return text[0:-1]
+ else:
+ return text
+
+
+def base64_assure_padding(text, pad='='):
+ """Assure the input text ends with padding.
+
+ Base64 text is normally expected to be a multiple of 4
+ characters. Each 4 character base64 sequence produces 3 octets of
+ binary data. If the binary data is not a multiple of 3 the base64
+ text is padded at the end with a pad character such that it is
+ always a multiple of 4. Padding is ignored and does not alter the
+ binary data nor it's length.
+
+ In some circumstances it is desirable to omit the padding
+ character due to transport encoding conflicts. Base64 text can
+ still be correctly decoded if the length of the base64 text
+ (consisting only of characters in the desired base64 alphabet) is
+ known, padding is not absolutely necessary.
+
+ Some base64 decoders demand correct padding or one may wish to
+ format RFC compliant base64, this function performs this action.
+
+ Input is assumed to consist only of members of a base64
+ alphabet (i.e no whitespace). Iteration yields a sequence of lines.
+ The line does NOT terminate with a line ending.
+
+ Use the filter_formatting() function to assure the input text
+ contains only the members of the alphabet.
+
+ If the text ends with the pad it is assumed to already be
+ padded. Otherwise the binary length is computed from the input
+ text length and correct number of pad characters are appended.
+
+ :param text: text containing ONLY characters in a base64 alphabet
+ :type text: string
+ :param pad: pad character (must be single character) (default: '=')
+ :type pad: string
+ :returns: string -- input base64 text with padding
+ :raises: ValueError
+ """
+ _check_padding_length(pad)
+
+ if text.endswith(pad):
+ return text
+
+ n = len(text) % 4
+ if n == 0:
+ return text
+
+ n = 4 - n
+ padding = pad * n
+ return text + padding
+
+
+def base64_wrap_iter(text, width=64):
+ """Fold text into lines of text with max line length.
+
+ Input is assumed to consist only of members of a base64
+ alphabet (i.e no whitespace). Iteration yields a sequence of lines.
+ The line does NOT terminate with a line ending.
+
+ Use the filter_formatting() function to assure the input text
+ contains only the members of the alphabet.
+
+ :param text: text containing ONLY characters in a base64 alphabet
+ :type text: string
+ :param width: number of characters in each wrapped line (default: 64)
+ :type width: int
+ :returns: generator -- sequence of lines of base64 text.
+ """
+
+ text = six.text_type(text)
+ for x in six.moves.range(0, len(text), width):
+ yield text[x:x + width]
+
+
+def base64_wrap(text, width=64):
+ """Fold text into lines of text with max line length.
+
+ Input is assumed to consist only of members of a base64
+ alphabet (i.e no whitespace). Fold the text into lines whose
+ line length is width chars long, terminate each line with line
+ ending (default is '\\n'). Return the wrapped text as a single
+ string.
+
+ Use the filter_formatting() function to assure the input text
+ contains only the members of the alphabet.
+
+ :param text: text containing ONLY characters in a base64 alphabet
+ :type text: string
+ :param width: number of characters in each wrapped line (default: 64)
+ :type width: int
+ :returns: string -- wrapped text.
+ """
+
+ buf = six.StringIO()
+
+ for line in base64_wrap_iter(text, width):
+ buf.write(line)
+ buf.write(u'\n')
+
+ text = buf.getvalue()
+ buf.close()
+ return text
diff --git a/keystone-moon/keystone/common/cache/__init__.py b/keystone-moon/keystone/common/cache/__init__.py
new file mode 100644
index 00000000..49502399
--- /dev/null
+++ b/keystone-moon/keystone/common/cache/__init__.py
@@ -0,0 +1,15 @@
+# Copyright 2013 Metacloud
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from keystone.common.cache.core import * # noqa
diff --git a/keystone-moon/keystone/common/cache/_memcache_pool.py b/keystone-moon/keystone/common/cache/_memcache_pool.py
new file mode 100644
index 00000000..b15332db
--- /dev/null
+++ b/keystone-moon/keystone/common/cache/_memcache_pool.py
@@ -0,0 +1,233 @@
+# Copyright 2014 Mirantis Inc
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""Thread-safe connection pool for python-memcached."""
+
+# NOTE(yorik-sar): this file is copied between keystone and keystonemiddleware
+# and should be kept in sync until we can use external library for this.
+
+import collections
+import contextlib
+import itertools
+import logging
+import threading
+import time
+
+import memcache
+from oslo_log import log
+from six.moves import queue
+
+from keystone import exception
+from keystone.i18n import _
+
+
+LOG = log.getLogger(__name__)
+
+# This 'class' is taken from http://stackoverflow.com/a/22520633/238308
+# Don't inherit client from threading.local so that we can reuse clients in
+# different threads
+_MemcacheClient = type('_MemcacheClient', (object,),
+ dict(memcache.Client.__dict__))
+
+_PoolItem = collections.namedtuple('_PoolItem', ['ttl', 'connection'])
+
+
+class ConnectionPool(queue.Queue):
+ """Base connection pool class
+
+ This class implements the basic connection pool logic as an abstract base
+ class.
+ """
+ def __init__(self, maxsize, unused_timeout, conn_get_timeout=None):
+ """Initialize the connection pool.
+
+ :param maxsize: maximum number of client connections for the pool
+ :type maxsize: int
+ :param unused_timeout: idle time to live for unused clients (in
+ seconds). If a client connection object has been
+ in the pool and idle for longer than the
+ unused_timeout, it will be reaped. This is to
+ ensure resources are released as utilization
+ goes down.
+ :type unused_timeout: int
+ :param conn_get_timeout: maximum time in seconds to wait for a
+ connection. If set to `None` timeout is
+ indefinite.
+ :type conn_get_timeout: int
+ """
+ # super() cannot be used here because Queue in stdlib is an
+ # old-style class
+ queue.Queue.__init__(self, maxsize)
+ self._unused_timeout = unused_timeout
+ self._connection_get_timeout = conn_get_timeout
+ self._acquired = 0
+
+ def _create_connection(self):
+ """Returns a connection instance.
+
+ This is called when the pool needs another instance created.
+
+ :returns: a new connection instance
+
+ """
+ raise NotImplementedError
+
+ def _destroy_connection(self, conn):
+ """Destroy and cleanup a connection instance.
+
+ This is called when the pool wishes to get rid of an existing
+ connection. This is the opportunity for a subclass to free up
+ resources and cleaup after itself.
+
+ :param conn: the connection object to destroy
+
+ """
+ raise NotImplementedError
+
+ def _debug_logger(self, msg, *args, **kwargs):
+ if LOG.isEnabledFor(logging.DEBUG):
+ thread_id = threading.current_thread().ident
+ args = (id(self), thread_id) + args
+ prefix = 'Memcached pool %s, thread %s: '
+ LOG.debug(prefix + msg, *args, **kwargs)
+
+ @contextlib.contextmanager
+ def acquire(self):
+ self._debug_logger('Acquiring connection')
+ try:
+ conn = self.get(timeout=self._connection_get_timeout)
+ except queue.Empty:
+ raise exception.UnexpectedError(
+ _('Unable to get a connection from pool id %(id)s after '
+ '%(seconds)s seconds.') %
+ {'id': id(self), 'seconds': self._connection_get_timeout})
+ self._debug_logger('Acquired connection %s', id(conn))
+ try:
+ yield conn
+ finally:
+ self._debug_logger('Releasing connection %s', id(conn))
+ self._drop_expired_connections()
+ try:
+ # super() cannot be used here because Queue in stdlib is an
+ # old-style class
+ queue.Queue.put(self, conn, block=False)
+ except queue.Full:
+ self._debug_logger('Reaping exceeding connection %s', id(conn))
+ self._destroy_connection(conn)
+
+ def _qsize(self):
+ if self.maxsize:
+ return self.maxsize - self._acquired
+ else:
+ # A value indicating there is always a free connection
+ # if maxsize is None or 0
+ return 1
+
+ # NOTE(dstanek): stdlib and eventlet Queue implementations
+ # have different names for the qsize method. This ensures
+ # that we override both of them.
+ if not hasattr(queue.Queue, '_qsize'):
+ qsize = _qsize
+
+ def _get(self):
+ if self.queue:
+ conn = self.queue.pop().connection
+ else:
+ conn = self._create_connection()
+ self._acquired += 1
+ return conn
+
+ def _drop_expired_connections(self):
+ """Drop all expired connections from the right end of the queue."""
+ now = time.time()
+ while self.queue and self.queue[0].ttl < now:
+ conn = self.queue.popleft().connection
+ self._debug_logger('Reaping connection %s', id(conn))
+ self._destroy_connection(conn)
+
+ def _put(self, conn):
+ self.queue.append(_PoolItem(
+ ttl=time.time() + self._unused_timeout,
+ connection=conn,
+ ))
+ self._acquired -= 1
+
+
+class MemcacheClientPool(ConnectionPool):
+ def __init__(self, urls, arguments, **kwargs):
+ # super() cannot be used here because Queue in stdlib is an
+ # old-style class
+ ConnectionPool.__init__(self, **kwargs)
+ self.urls = urls
+ self._arguments = arguments
+ # NOTE(morganfainberg): The host objects expect an int for the
+ # deaduntil value. Initialize this at 0 for each host with 0 indicating
+ # the host is not dead.
+ self._hosts_deaduntil = [0] * len(urls)
+
+ def _create_connection(self):
+ return _MemcacheClient(self.urls, **self._arguments)
+
+ def _destroy_connection(self, conn):
+ conn.disconnect_all()
+
+ def _get(self):
+ # super() cannot be used here because Queue in stdlib is an
+ # old-style class
+ conn = ConnectionPool._get(self)
+ try:
+ # Propagate host state known to us to this client's list
+ now = time.time()
+ for deaduntil, host in zip(self._hosts_deaduntil, conn.servers):
+ if deaduntil > now and host.deaduntil <= now:
+ host.mark_dead('propagating death mark from the pool')
+ host.deaduntil = deaduntil
+ except Exception:
+ # We need to be sure that connection doesn't leak from the pool.
+ # This code runs before we enter context manager's try-finally
+ # block, so we need to explicitly release it here.
+ # super() cannot be used here because Queue in stdlib is an
+ # old-style class
+ ConnectionPool._put(self, conn)
+ raise
+ return conn
+
+ def _put(self, conn):
+ try:
+ # If this client found that one of the hosts is dead, mark it as
+ # such in our internal list
+ now = time.time()
+ for i, host in zip(itertools.count(), conn.servers):
+ deaduntil = self._hosts_deaduntil[i]
+ # Do nothing if we already know this host is dead
+ if deaduntil <= now:
+ if host.deaduntil > now:
+ self._hosts_deaduntil[i] = host.deaduntil
+ self._debug_logger(
+ 'Marked host %s dead until %s',
+ self.urls[i], host.deaduntil)
+ else:
+ self._hosts_deaduntil[i] = 0
+ # If all hosts are dead we should forget that they're dead. This
+ # way we won't get completely shut off until dead_retry seconds
+ # pass, but will be checking servers as frequent as we can (over
+ # way smaller socket_timeout)
+ if all(deaduntil > now for deaduntil in self._hosts_deaduntil):
+ self._debug_logger('All hosts are dead. Marking them as live.')
+ self._hosts_deaduntil[:] = [0] * len(self._hosts_deaduntil)
+ finally:
+ # super() cannot be used here because Queue in stdlib is an
+ # old-style class
+ ConnectionPool._put(self, conn)
diff --git a/keystone-moon/keystone/common/cache/backends/__init__.py b/keystone-moon/keystone/common/cache/backends/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/keystone-moon/keystone/common/cache/backends/__init__.py
diff --git a/keystone-moon/keystone/common/cache/backends/memcache_pool.py b/keystone-moon/keystone/common/cache/backends/memcache_pool.py
new file mode 100644
index 00000000..f3990b12
--- /dev/null
+++ b/keystone-moon/keystone/common/cache/backends/memcache_pool.py
@@ -0,0 +1,61 @@
+# Copyright 2014 Mirantis Inc
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""dogpile.cache backend that uses Memcached connection pool"""
+
+import functools
+import logging
+
+from dogpile.cache.backends import memcached as memcached_backend
+
+from keystone.common.cache import _memcache_pool
+
+
+LOG = logging.getLogger(__name__)
+
+
+# Helper to ease backend refactoring
+class ClientProxy(object):
+ def __init__(self, client_pool):
+ self.client_pool = client_pool
+
+ def _run_method(self, __name, *args, **kwargs):
+ with self.client_pool.acquire() as client:
+ return getattr(client, __name)(*args, **kwargs)
+
+ def __getattr__(self, name):
+ return functools.partial(self._run_method, name)
+
+
+class PooledMemcachedBackend(memcached_backend.MemcachedBackend):
+ # Composed from GenericMemcachedBackend's and MemcacheArgs's __init__
+ def __init__(self, arguments):
+ super(PooledMemcachedBackend, self).__init__(arguments)
+ self.client_pool = _memcache_pool.MemcacheClientPool(
+ self.url,
+ arguments={
+ 'dead_retry': arguments.get('dead_retry', 5 * 60),
+ 'socket_timeout': arguments.get('socket_timeout', 3),
+ },
+ maxsize=arguments.get('pool_maxsize', 10),
+ unused_timeout=arguments.get('pool_unused_timeout', 60),
+ conn_get_timeout=arguments.get('pool_connection_get_timeout', 10),
+ )
+
+ # Since all methods in backend just call one of methods of client, this
+ # lets us avoid need to hack it too much
+ @property
+ def client(self):
+ return ClientProxy(self.client_pool)
diff --git a/keystone-moon/keystone/common/cache/backends/mongo.py b/keystone-moon/keystone/common/cache/backends/mongo.py
new file mode 100644
index 00000000..b5de9bc4
--- /dev/null
+++ b/keystone-moon/keystone/common/cache/backends/mongo.py
@@ -0,0 +1,557 @@
+# Copyright 2014 Hewlett-Packard Development Company, L.P.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import abc
+import datetime
+
+from dogpile.cache import api
+from dogpile.cache import util as dp_util
+from oslo_log import log
+from oslo_utils import importutils
+from oslo_utils import timeutils
+import six
+
+from keystone import exception
+from keystone.i18n import _, _LW
+
+
+NO_VALUE = api.NO_VALUE
+LOG = log.getLogger(__name__)
+
+
+class MongoCacheBackend(api.CacheBackend):
+ """A MongoDB based caching backend implementing dogpile backend APIs.
+
+ Arguments accepted in the arguments dictionary:
+
+ :param db_hosts: string (required), hostname or IP address of the
+ MongoDB server instance. This can be a single MongoDB connection URI,
+ or a list of MongoDB connection URIs.
+
+ :param db_name: string (required), the name of the database to be used.
+
+ :param cache_collection: string (required), the name of collection to store
+ cached data.
+ *Note:* Different collection name can be provided if there is need to
+ create separate container (i.e. collection) for cache data. So region
+ configuration is done per collection.
+
+ Following are optional parameters for MongoDB backend configuration,
+
+ :param username: string, the name of the user to authenticate.
+
+ :param password: string, the password of the user to authenticate.
+
+ :param max_pool_size: integer, the maximum number of connections that the
+ pool will open simultaneously. By default the pool size is 10.
+
+ :param w: integer, write acknowledgement for MongoDB client
+
+ If not provided, then no default is set on MongoDB and then write
+ acknowledgement behavior occurs as per MongoDB default. This parameter
+ name is same as what is used in MongoDB docs. This value is specified
+ at collection level so its applicable to `cache_collection` db write
+ operations.
+
+ If this is a replica set, write operations will block until they have
+ been replicated to the specified number or tagged set of servers.
+ Setting w=0 disables write acknowledgement and all other write concern
+ options.
+
+ :param read_preference: string, the read preference mode for MongoDB client
+ Expected value is ``primary``, ``primaryPreferred``, ``secondary``,
+ ``secondaryPreferred``, or ``nearest``. This read_preference is
+ specified at collection level so its applicable to `cache_collection`
+ db read operations.
+
+ :param use_replica: boolean, flag to indicate if replica client to be
+ used. Default is `False`. `replicaset_name` value is required if
+ `True`.
+
+ :param replicaset_name: string, name of replica set.
+ Becomes required if `use_replica` is `True`
+
+ :param son_manipulator: string, name of class with module name which
+ implements MongoDB SONManipulator.
+ Default manipulator used is :class:`.BaseTransform`.
+
+ This manipulator is added per database. In multiple cache
+ configurations, the manipulator name should be same if same
+ database name ``db_name`` is used in those configurations.
+
+ SONManipulator is used to manipulate custom data types as they are
+ saved or retrieved from MongoDB. Custom impl is only needed if cached
+ data is custom class and needs transformations when saving or reading
+ from db. If dogpile cached value contains built-in data types, then
+ BaseTransform class is sufficient as it already handles dogpile
+ CachedValue class transformation.
+
+ :param mongo_ttl_seconds: integer, interval in seconds to indicate maximum
+ time-to-live value.
+ If value is greater than 0, then its assumed that cache_collection
+ needs to be TTL type (has index at 'doc_date' field).
+ By default, the value is -1 and its disabled.
+ Reference: <http://docs.mongodb.org/manual/tutorial/expire-data/>
+
+ .. NOTE::
+
+ This parameter is different from Dogpile own
+ expiration_time, which is the number of seconds after which Dogpile
+ will consider the value to be expired. When Dogpile considers a
+ value to be expired, it continues to use the value until generation
+ of a new value is complete, when using CacheRegion.get_or_create().
+ Therefore, if you are setting `mongo_ttl_seconds`, you will want to
+ make sure it is greater than expiration_time by at least enough
+ seconds for new values to be generated, else the value would not
+ be available during a regeneration, forcing all threads to wait for
+ a regeneration each time a value expires.
+
+ :param ssl: boolean, If True, create the connection to the server
+ using SSL. Default is `False`. Client SSL connection parameters depends
+ on server side SSL setup. For further reference on SSL configuration:
+ <http://docs.mongodb.org/manual/tutorial/configure-ssl/>
+
+ :param ssl_keyfile: string, the private keyfile used to identify the
+ local connection against mongod. If included with the certfile then
+ only the `ssl_certfile` is needed. Used only when `ssl` is `True`.
+
+ :param ssl_certfile: string, the certificate file used to identify the
+ local connection against mongod. Used only when `ssl` is `True`.
+
+ :param ssl_ca_certs: string, the ca_certs file contains a set of
+ concatenated 'certification authority' certificates, which are used to
+ validate certificates passed from the other end of the connection.
+ Used only when `ssl` is `True`.
+
+ :param ssl_cert_reqs: string, the parameter cert_reqs specifies whether
+ a certificate is required from the other side of the connection, and
+ whether it will be validated if provided. It must be one of the three
+ values ``ssl.CERT_NONE`` (certificates ignored), ``ssl.CERT_OPTIONAL``
+ (not required, but validated if provided), or
+ ``ssl.CERT_REQUIRED`` (required and validated). If the value of this
+ parameter is not ``ssl.CERT_NONE``, then the ssl_ca_certs parameter
+ must point to a file of CA certificates. Used only when `ssl`
+ is `True`.
+
+ Rest of arguments are passed to mongo calls for read, write and remove.
+ So related options can be specified to pass to these operations.
+
+ Further details of various supported arguments can be referred from
+ <http://api.mongodb.org/python/current/api/pymongo/>
+
+ """
+
+ def __init__(self, arguments):
+ self.api = MongoApi(arguments)
+
+ @dp_util.memoized_property
+ def client(self):
+ """Initializes MongoDB connection and collection defaults.
+
+ This initialization is done only once and performed as part of lazy
+ inclusion of MongoDB dependency i.e. add imports only if related
+ backend is used.
+
+ :return: :class:`.MongoApi` instance
+ """
+ self.api.get_cache_collection()
+ return self.api
+
+ def get(self, key):
+ value = self.client.get(key)
+ if value is None:
+ return NO_VALUE
+ else:
+ return value
+
+ def get_multi(self, keys):
+ values = self.client.get_multi(keys)
+ return [
+ NO_VALUE if key not in values
+ else values[key] for key in keys
+ ]
+
+ def set(self, key, value):
+ self.client.set(key, value)
+
+ def set_multi(self, mapping):
+ self.client.set_multi(mapping)
+
+ def delete(self, key):
+ self.client.delete(key)
+
+ def delete_multi(self, keys):
+ self.client.delete_multi(keys)
+
+
+class MongoApi(object):
+ """Class handling MongoDB specific functionality.
+
+ This class uses PyMongo APIs internally to create database connection
+ with configured pool size, ensures unique index on key, does database
+ authentication and ensure TTL collection index if configured so.
+ This class also serves as handle to cache collection for dogpile cache
+ APIs.
+
+ In a single deployment, multiple cache configuration can be defined. In
+ that case of multiple cache collections usage, db client connection pool
+ is shared when cache collections are within same database.
+ """
+
+ # class level attributes for re-use of db client connection and collection
+ _DB = {} # dict of db_name: db connection reference
+ _MONGO_COLLS = {} # dict of cache_collection : db collection reference
+
+ def __init__(self, arguments):
+ self._init_args(arguments)
+ self._data_manipulator = None
+
+ def _init_args(self, arguments):
+ """Helper logic for collecting and parsing MongoDB specific arguments.
+
+ The arguments passed in are separated out in connection specific
+ setting and rest of arguments are passed to create/update/delete
+ db operations.
+ """
+ self.conn_kwargs = {} # connection specific arguments
+
+ self.hosts = arguments.pop('db_hosts', None)
+ if self.hosts is None:
+ msg = _('db_hosts value is required')
+ raise exception.ValidationError(message=msg)
+
+ self.db_name = arguments.pop('db_name', None)
+ if self.db_name is None:
+ msg = _('database db_name is required')
+ raise exception.ValidationError(message=msg)
+
+ self.cache_collection = arguments.pop('cache_collection', None)
+ if self.cache_collection is None:
+ msg = _('cache_collection name is required')
+ raise exception.ValidationError(message=msg)
+
+ self.username = arguments.pop('username', None)
+ self.password = arguments.pop('password', None)
+ self.max_pool_size = arguments.pop('max_pool_size', 10)
+
+ self.w = arguments.pop('w', -1)
+ try:
+ self.w = int(self.w)
+ except ValueError:
+ msg = _('integer value expected for w (write concern attribute)')
+ raise exception.ValidationError(message=msg)
+
+ self.read_preference = arguments.pop('read_preference', None)
+
+ self.use_replica = arguments.pop('use_replica', False)
+ if self.use_replica:
+ if arguments.get('replicaset_name') is None:
+ msg = _('replicaset_name required when use_replica is True')
+ raise exception.ValidationError(message=msg)
+ self.replicaset_name = arguments.get('replicaset_name')
+
+ self.son_manipulator = arguments.pop('son_manipulator', None)
+
+ # set if mongo collection needs to be TTL type.
+ # This needs to be max ttl for any cache entry.
+ # By default, -1 means don't use TTL collection.
+ # With ttl set, it creates related index and have doc_date field with
+ # needed expiration interval
+ self.ttl_seconds = arguments.pop('mongo_ttl_seconds', -1)
+ try:
+ self.ttl_seconds = int(self.ttl_seconds)
+ except ValueError:
+ msg = _('integer value expected for mongo_ttl_seconds')
+ raise exception.ValidationError(message=msg)
+
+ self.conn_kwargs['ssl'] = arguments.pop('ssl', False)
+ if self.conn_kwargs['ssl']:
+ ssl_keyfile = arguments.pop('ssl_keyfile', None)
+ ssl_certfile = arguments.pop('ssl_certfile', None)
+ ssl_ca_certs = arguments.pop('ssl_ca_certs', None)
+ ssl_cert_reqs = arguments.pop('ssl_cert_reqs', None)
+ if ssl_keyfile:
+ self.conn_kwargs['ssl_keyfile'] = ssl_keyfile
+ if ssl_certfile:
+ self.conn_kwargs['ssl_certfile'] = ssl_certfile
+ if ssl_ca_certs:
+ self.conn_kwargs['ssl_ca_certs'] = ssl_ca_certs
+ if ssl_cert_reqs:
+ self.conn_kwargs['ssl_cert_reqs'] = (
+ self._ssl_cert_req_type(ssl_cert_reqs))
+
+ # rest of arguments are passed to mongo crud calls
+ self.meth_kwargs = arguments
+
+ def _ssl_cert_req_type(self, req_type):
+ try:
+ import ssl
+ except ImportError:
+ raise exception.ValidationError(_('no ssl support available'))
+ req_type = req_type.upper()
+ try:
+ return {
+ 'NONE': ssl.CERT_NONE,
+ 'OPTIONAL': ssl.CERT_OPTIONAL,
+ 'REQUIRED': ssl.CERT_REQUIRED
+ }[req_type]
+ except KeyError:
+ msg = _('Invalid ssl_cert_reqs value of %s, must be one of '
+ '"NONE", "OPTIONAL", "REQUIRED"') % (req_type)
+ raise exception.ValidationError(message=msg)
+
+ def _get_db(self):
+ # defer imports until backend is used
+ global pymongo
+ import pymongo
+ if self.use_replica:
+ connection = pymongo.MongoReplicaSetClient(
+ host=self.hosts, replicaSet=self.replicaset_name,
+ max_pool_size=self.max_pool_size, **self.conn_kwargs)
+ else: # used for standalone node or mongos in sharded setup
+ connection = pymongo.MongoClient(
+ host=self.hosts, max_pool_size=self.max_pool_size,
+ **self.conn_kwargs)
+
+ database = getattr(connection, self.db_name)
+
+ self._assign_data_mainpulator()
+ database.add_son_manipulator(self._data_manipulator)
+ if self.username and self.password:
+ database.authenticate(self.username, self.password)
+ return database
+
+ def _assign_data_mainpulator(self):
+ if self._data_manipulator is None:
+ if self.son_manipulator:
+ self._data_manipulator = importutils.import_object(
+ self.son_manipulator)
+ else:
+ self._data_manipulator = BaseTransform()
+
+ def _get_doc_date(self):
+ if self.ttl_seconds > 0:
+ expire_delta = datetime.timedelta(seconds=self.ttl_seconds)
+ doc_date = timeutils.utcnow() + expire_delta
+ else:
+ doc_date = timeutils.utcnow()
+ return doc_date
+
+ def get_cache_collection(self):
+ if self.cache_collection not in self._MONGO_COLLS:
+ global pymongo
+ import pymongo
+ # re-use db client connection if already defined as part of
+ # earlier dogpile cache configuration
+ if self.db_name not in self._DB:
+ self._DB[self.db_name] = self._get_db()
+ coll = getattr(self._DB[self.db_name], self.cache_collection)
+
+ self._assign_data_mainpulator()
+ if self.read_preference:
+ self.read_preference = pymongo.read_preferences.mongos_enum(
+ self.read_preference)
+ coll.read_preference = self.read_preference
+ if self.w > -1:
+ coll.write_concern['w'] = self.w
+ if self.ttl_seconds > 0:
+ kwargs = {'expireAfterSeconds': self.ttl_seconds}
+ coll.ensure_index('doc_date', cache_for=5, **kwargs)
+ else:
+ self._validate_ttl_index(coll, self.cache_collection,
+ self.ttl_seconds)
+ self._MONGO_COLLS[self.cache_collection] = coll
+
+ return self._MONGO_COLLS[self.cache_collection]
+
+ def _get_cache_entry(self, key, value, meta, doc_date):
+ """MongoDB cache data representation.
+
+ Storing cache key as ``_id`` field as MongoDB by default creates
+ unique index on this field. So no need to create separate field and
+ index for storing cache key. Cache data has additional ``doc_date``
+ field for MongoDB TTL collection support.
+ """
+ return dict(_id=key, value=value, meta=meta, doc_date=doc_date)
+
+ def _validate_ttl_index(self, collection, coll_name, ttl_seconds):
+ """Checks if existing TTL index is removed on a collection.
+
+ This logs warning when existing collection has TTL index defined and
+ new cache configuration tries to disable index with
+ ``mongo_ttl_seconds < 0``. In that case, existing index needs
+ to be addressed first to make new configuration effective.
+ Refer to MongoDB documentation around TTL index for further details.
+ """
+ indexes = collection.index_information()
+ for indx_name, index_data in six.iteritems(indexes):
+ if all(k in index_data for k in ('key', 'expireAfterSeconds')):
+ existing_value = index_data['expireAfterSeconds']
+ fld_present = 'doc_date' in index_data['key'][0]
+ if fld_present and existing_value > -1 and ttl_seconds < 1:
+ msg = _LW('TTL index already exists on db collection '
+ '<%(c_name)s>, remove index <%(indx_name)s> '
+ 'first to make updated mongo_ttl_seconds value '
+ 'to be effective')
+ LOG.warn(msg, {'c_name': coll_name,
+ 'indx_name': indx_name})
+
+ def get(self, key):
+ critieria = {'_id': key}
+ result = self.get_cache_collection().find_one(spec_or_id=critieria,
+ **self.meth_kwargs)
+ if result:
+ return result['value']
+ else:
+ return None
+
+ def get_multi(self, keys):
+ db_results = self._get_results_as_dict(keys)
+ return {doc['_id']: doc['value'] for doc in six.itervalues(db_results)}
+
+ def _get_results_as_dict(self, keys):
+ critieria = {'_id': {'$in': keys}}
+ db_results = self.get_cache_collection().find(spec=critieria,
+ **self.meth_kwargs)
+ return {doc['_id']: doc for doc in db_results}
+
+ def set(self, key, value):
+ doc_date = self._get_doc_date()
+ ref = self._get_cache_entry(key, value.payload, value.metadata,
+ doc_date)
+ spec = {'_id': key}
+ # find and modify does not have manipulator support
+ # so need to do conversion as part of input document
+ ref = self._data_manipulator.transform_incoming(ref, self)
+ self.get_cache_collection().find_and_modify(spec, ref, upsert=True,
+ **self.meth_kwargs)
+
+ def set_multi(self, mapping):
+ """Insert multiple documents specified as key, value pairs.
+
+ In this case, multiple documents can be added via insert provided they
+ do not exist.
+ Update of multiple existing documents is done one by one
+ """
+ doc_date = self._get_doc_date()
+ insert_refs = []
+ update_refs = []
+ existing_docs = self._get_results_as_dict(mapping.keys())
+ for key, value in mapping.items():
+ ref = self._get_cache_entry(key, value.payload, value.metadata,
+ doc_date)
+ if key in existing_docs:
+ ref['_id'] = existing_docs[key]['_id']
+ update_refs.append(ref)
+ else:
+ insert_refs.append(ref)
+ if insert_refs:
+ self.get_cache_collection().insert(insert_refs, manipulate=True,
+ **self.meth_kwargs)
+ for upd_doc in update_refs:
+ self.get_cache_collection().save(upd_doc, manipulate=True,
+ **self.meth_kwargs)
+
+ def delete(self, key):
+ critieria = {'_id': key}
+ self.get_cache_collection().remove(spec_or_id=critieria,
+ **self.meth_kwargs)
+
+ def delete_multi(self, keys):
+ critieria = {'_id': {'$in': keys}}
+ self.get_cache_collection().remove(spec_or_id=critieria,
+ **self.meth_kwargs)
+
+
+@six.add_metaclass(abc.ABCMeta)
+class AbstractManipulator(object):
+ """Abstract class with methods which need to be implemented for custom
+ manipulation.
+
+ Adding this as a base class for :class:`.BaseTransform` instead of adding
+ import dependency of pymongo specific class i.e.
+ `pymongo.son_manipulator.SONManipulator` and using that as base class.
+ This is done to avoid pymongo dependency if MongoDB backend is not used.
+ """
+ @abc.abstractmethod
+ def transform_incoming(self, son, collection):
+ """Used while saving data to MongoDB.
+
+ :param son: the SON object to be inserted into the database
+ :param collection: the collection the object is being inserted into
+
+ :returns: transformed SON object
+
+ """
+ raise exception.NotImplemented() # pragma: no cover
+
+ @abc.abstractmethod
+ def transform_outgoing(self, son, collection):
+ """Used while reading data from MongoDB.
+
+ :param son: the SON object being retrieved from the database
+ :param collection: the collection this object was stored in
+
+ :returns: transformed SON object
+ """
+ raise exception.NotImplemented() # pragma: no cover
+
+ def will_copy(self):
+ """Will this SON manipulator make a copy of the incoming document?
+
+ Derived classes that do need to make a copy should override this
+ method, returning `True` instead of `False`.
+
+ :returns: boolean
+ """
+ return False
+
+
+class BaseTransform(AbstractManipulator):
+ """Base transformation class to store and read dogpile cached data
+ from MongoDB.
+
+ This is needed as dogpile internally stores data as a custom class
+ i.e. dogpile.cache.api.CachedValue
+
+ Note: Custom manipulator needs to always override ``transform_incoming``
+ and ``transform_outgoing`` methods. MongoDB manipulator logic specifically
+ checks that overridden method in instance and its super are different.
+ """
+
+ def transform_incoming(self, son, collection):
+ """Used while saving data to MongoDB."""
+ for (key, value) in son.items():
+ if isinstance(value, api.CachedValue):
+ son[key] = value.payload # key is 'value' field here
+ son['meta'] = value.metadata
+ elif isinstance(value, dict): # Make sure we recurse into sub-docs
+ son[key] = self.transform_incoming(value, collection)
+ return son
+
+ def transform_outgoing(self, son, collection):
+ """Used while reading data from MongoDB."""
+ metadata = None
+ # make sure its top level dictionary with all expected fields names
+ # present
+ if isinstance(son, dict) and all(k in son for k in
+ ('_id', 'value', 'meta', 'doc_date')):
+ payload = son.pop('value', None)
+ metadata = son.pop('meta', None)
+ for (key, value) in son.items():
+ if isinstance(value, dict):
+ son[key] = self.transform_outgoing(value, collection)
+ if metadata is not None:
+ son['value'] = api.CachedValue(payload, metadata)
+ return son
diff --git a/keystone-moon/keystone/common/cache/backends/noop.py b/keystone-moon/keystone/common/cache/backends/noop.py
new file mode 100644
index 00000000..38329c94
--- /dev/null
+++ b/keystone-moon/keystone/common/cache/backends/noop.py
@@ -0,0 +1,49 @@
+# Copyright 2013 Metacloud
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from dogpile.cache import api
+
+
+NO_VALUE = api.NO_VALUE
+
+
+class NoopCacheBackend(api.CacheBackend):
+ """A no op backend as a default caching backend.
+
+ The no op backend is provided as the default caching backend for keystone
+ to ensure that ``dogpile.cache.memory`` is not used in any real-world
+ circumstances unintentionally. ``dogpile.cache.memory`` does not have a
+ mechanism to cleanup it's internal dict and therefore could cause run-away
+ memory utilization.
+ """
+ def __init__(self, *args):
+ return
+
+ def get(self, key):
+ return NO_VALUE
+
+ def get_multi(self, keys):
+ return [NO_VALUE for x in keys]
+
+ def set(self, key, value):
+ return
+
+ def set_multi(self, mapping):
+ return
+
+ def delete(self, key):
+ return
+
+ def delete_multi(self, keys):
+ return
diff --git a/keystone-moon/keystone/common/cache/core.py b/keystone-moon/keystone/common/cache/core.py
new file mode 100644
index 00000000..306587b3
--- /dev/null
+++ b/keystone-moon/keystone/common/cache/core.py
@@ -0,0 +1,308 @@
+# Copyright 2013 Metacloud
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""Keystone Caching Layer Implementation."""
+
+import dogpile.cache
+from dogpile.cache import proxy
+from dogpile.cache import util
+from oslo_config import cfg
+from oslo_log import log
+from oslo_utils import importutils
+
+from keystone import exception
+from keystone.i18n import _, _LE
+
+
+CONF = cfg.CONF
+LOG = log.getLogger(__name__)
+
+make_region = dogpile.cache.make_region
+
+dogpile.cache.register_backend(
+ 'keystone.common.cache.noop',
+ 'keystone.common.cache.backends.noop',
+ 'NoopCacheBackend')
+
+dogpile.cache.register_backend(
+ 'keystone.cache.mongo',
+ 'keystone.common.cache.backends.mongo',
+ 'MongoCacheBackend')
+
+dogpile.cache.register_backend(
+ 'keystone.cache.memcache_pool',
+ 'keystone.common.cache.backends.memcache_pool',
+ 'PooledMemcachedBackend')
+
+
+class DebugProxy(proxy.ProxyBackend):
+ """Extra Logging ProxyBackend."""
+ # NOTE(morganfainberg): Pass all key/values through repr to ensure we have
+ # a clean description of the information. Without use of repr, it might
+ # be possible to run into encode/decode error(s). For logging/debugging
+ # purposes encode/decode is irrelevant and we should be looking at the
+ # data exactly as it stands.
+
+ def get(self, key):
+ value = self.proxied.get(key)
+ LOG.debug('CACHE_GET: Key: "%(key)r" Value: "%(value)r"',
+ {'key': key, 'value': value})
+ return value
+
+ def get_multi(self, keys):
+ values = self.proxied.get_multi(keys)
+ LOG.debug('CACHE_GET_MULTI: "%(keys)r" Values: "%(values)r"',
+ {'keys': keys, 'values': values})
+ return values
+
+ def set(self, key, value):
+ LOG.debug('CACHE_SET: Key: "%(key)r" Value: "%(value)r"',
+ {'key': key, 'value': value})
+ return self.proxied.set(key, value)
+
+ def set_multi(self, keys):
+ LOG.debug('CACHE_SET_MULTI: "%r"', keys)
+ self.proxied.set_multi(keys)
+
+ def delete(self, key):
+ self.proxied.delete(key)
+ LOG.debug('CACHE_DELETE: "%r"', key)
+
+ def delete_multi(self, keys):
+ LOG.debug('CACHE_DELETE_MULTI: "%r"', keys)
+ self.proxied.delete_multi(keys)
+
+
+def build_cache_config():
+ """Build the cache region dictionary configuration.
+
+ :returns: dict
+ """
+ prefix = CONF.cache.config_prefix
+ conf_dict = {}
+ conf_dict['%s.backend' % prefix] = CONF.cache.backend
+ conf_dict['%s.expiration_time' % prefix] = CONF.cache.expiration_time
+ for argument in CONF.cache.backend_argument:
+ try:
+ (argname, argvalue) = argument.split(':', 1)
+ except ValueError:
+ msg = _LE('Unable to build cache config-key. Expected format '
+ '"<argname>:<value>". Skipping unknown format: %s')
+ LOG.error(msg, argument)
+ continue
+
+ arg_key = '.'.join([prefix, 'arguments', argname])
+ conf_dict[arg_key] = argvalue
+
+ LOG.debug('Keystone Cache Config: %s', conf_dict)
+ # NOTE(yorik-sar): these arguments will be used for memcache-related
+ # backends. Use setdefault for url to support old-style setting through
+ # backend_argument=url:127.0.0.1:11211
+ conf_dict.setdefault('%s.arguments.url' % prefix,
+ CONF.cache.memcache_servers)
+ for arg in ('dead_retry', 'socket_timeout', 'pool_maxsize',
+ 'pool_unused_timeout', 'pool_connection_get_timeout'):
+ value = getattr(CONF.cache, 'memcache_' + arg)
+ conf_dict['%s.arguments.%s' % (prefix, arg)] = value
+
+ return conf_dict
+
+
+def configure_cache_region(region):
+ """Configure a cache region.
+
+ :param region: optional CacheRegion object, if not provided a new region
+ will be instantiated
+ :raises: exception.ValidationError
+ :returns: dogpile.cache.CacheRegion
+ """
+ if not isinstance(region, dogpile.cache.CacheRegion):
+ raise exception.ValidationError(
+ _('region not type dogpile.cache.CacheRegion'))
+
+ if not region.is_configured:
+ # NOTE(morganfainberg): this is how you tell if a region is configured.
+ # There is a request logged with dogpile.cache upstream to make this
+ # easier / less ugly.
+
+ config_dict = build_cache_config()
+ region.configure_from_config(config_dict,
+ '%s.' % CONF.cache.config_prefix)
+
+ if CONF.cache.debug_cache_backend:
+ region.wrap(DebugProxy)
+
+ # NOTE(morganfainberg): if the backend requests the use of a
+ # key_mangler, we should respect that key_mangler function. If a
+ # key_mangler is not defined by the backend, use the sha1_mangle_key
+ # mangler provided by dogpile.cache. This ensures we always use a fixed
+ # size cache-key.
+ if region.key_mangler is None:
+ region.key_mangler = util.sha1_mangle_key
+
+ for class_path in CONF.cache.proxies:
+ # NOTE(morganfainberg): if we have any proxy wrappers, we should
+ # ensure they are added to the cache region's backend. Since
+ # configure_from_config doesn't handle the wrap argument, we need
+ # to manually add the Proxies. For information on how the
+ # ProxyBackends work, see the dogpile.cache documents on
+ # "changing-backend-behavior"
+ cls = importutils.import_class(class_path)
+ LOG.debug("Adding cache-proxy '%s' to backend.", class_path)
+ region.wrap(cls)
+
+ return region
+
+
+def get_should_cache_fn(section):
+ """Build a function that returns a config section's caching status.
+
+ For any given driver in keystone that has caching capabilities, a boolean
+ config option for that driver's section (e.g. ``token``) should exist and
+ default to ``True``. This function will use that value to tell the caching
+ decorator if caching for that driver is enabled. To properly use this
+ with the decorator, pass this function the configuration section and assign
+ the result to a variable. Pass the new variable to the caching decorator
+ as the named argument ``should_cache_fn``. e.g.::
+
+ from keystone.common import cache
+
+ SHOULD_CACHE = cache.get_should_cache_fn('token')
+
+ @cache.on_arguments(should_cache_fn=SHOULD_CACHE)
+ def function(arg1, arg2):
+ ...
+
+ :param section: name of the configuration section to examine
+ :type section: string
+ :returns: function reference
+ """
+ def should_cache(value):
+ if not CONF.cache.enabled:
+ return False
+ conf_group = getattr(CONF, section)
+ return getattr(conf_group, 'caching', True)
+ return should_cache
+
+
+def get_expiration_time_fn(section):
+ """Build a function that returns a config section's expiration time status.
+
+ For any given driver in keystone that has caching capabilities, an int
+ config option called ``cache_time`` for that driver's section
+ (e.g. ``token``) should exist and typically default to ``None``. This
+ function will use that value to tell the caching decorator of the TTL
+ override for caching the resulting objects. If the value of the config
+ option is ``None`` the default value provided in the
+ ``[cache] expiration_time`` option will be used by the decorator. The
+ default may be set to something other than ``None`` in cases where the
+ caching TTL should not be tied to the global default(s) (e.g.
+ revocation_list changes very infrequently and can be cached for >1h by
+ default).
+
+ To properly use this with the decorator, pass this function the
+ configuration section and assign the result to a variable. Pass the new
+ variable to the caching decorator as the named argument
+ ``expiration_time``. e.g.::
+
+ from keystone.common import cache
+
+ EXPIRATION_TIME = cache.get_expiration_time_fn('token')
+
+ @cache.on_arguments(expiration_time=EXPIRATION_TIME)
+ def function(arg1, arg2):
+ ...
+
+ :param section: name of the configuration section to examine
+ :type section: string
+ :rtype: function reference
+ """
+ def get_expiration_time():
+ conf_group = getattr(CONF, section)
+ return getattr(conf_group, 'cache_time', None)
+ return get_expiration_time
+
+
+def key_generate_to_str(s):
+ # NOTE(morganfainberg): Since we need to stringify all arguments, attempt
+ # to stringify and handle the Unicode error explicitly as needed.
+ try:
+ return str(s)
+ except UnicodeEncodeError:
+ return s.encode('utf-8')
+
+
+def function_key_generator(namespace, fn, to_str=key_generate_to_str):
+ # NOTE(morganfainberg): This wraps dogpile.cache's default
+ # function_key_generator to change the default to_str mechanism.
+ return util.function_key_generator(namespace, fn, to_str=to_str)
+
+
+REGION = dogpile.cache.make_region(
+ function_key_generator=function_key_generator)
+on_arguments = REGION.cache_on_arguments
+
+
+def get_memoization_decorator(section, expiration_section=None):
+ """Build a function based on the `on_arguments` decorator for the section.
+
+ For any given driver in Keystone that has caching capabilities, a
+ pair of functions is required to properly determine the status of the
+ caching capabilities (a toggle to indicate caching is enabled and any
+ override of the default TTL for cached data). This function will return
+ an object that has the memoization decorator ``on_arguments``
+ pre-configured for the driver.
+
+ Example usage::
+
+ from keystone.common import cache
+
+ MEMOIZE = cache.get_memoization_decorator(section='token')
+
+ @MEMOIZE
+ def function(arg1, arg2):
+ ...
+
+
+ ALTERNATE_MEMOIZE = cache.get_memoization_decorator(
+ section='token', expiration_section='revoke')
+
+ @ALTERNATE_MEMOIZE
+ def function2(arg1, arg2):
+ ...
+
+ :param section: name of the configuration section to examine
+ :type section: string
+ :param expiration_section: name of the configuration section to examine
+ for the expiration option. This will fall back
+ to using ``section`` if the value is unspecified
+ or ``None``
+ :type expiration_section: string
+ :rtype: function reference
+ """
+ if expiration_section is None:
+ expiration_section = section
+ should_cache = get_should_cache_fn(section)
+ expiration_time = get_expiration_time_fn(expiration_section)
+
+ memoize = REGION.cache_on_arguments(should_cache_fn=should_cache,
+ expiration_time=expiration_time)
+
+ # Make sure the actual "should_cache" and "expiration_time" methods are
+ # available. This is potentially interesting/useful to pre-seed cache
+ # values.
+ memoize.should_cache = should_cache
+ memoize.get_expiration_time = expiration_time
+
+ return memoize
diff --git a/keystone-moon/keystone/common/config.py b/keystone-moon/keystone/common/config.py
new file mode 100644
index 00000000..bcaedeef
--- /dev/null
+++ b/keystone-moon/keystone/common/config.py
@@ -0,0 +1,1118 @@
+# Copyright 2012 OpenStack Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from oslo_config import cfg
+import oslo_messaging
+
+
+_DEFAULT_AUTH_METHODS = ['external', 'password', 'token', 'oauth1']
+_CERTFILE = '/etc/keystone/ssl/certs/signing_cert.pem'
+_KEYFILE = '/etc/keystone/ssl/private/signing_key.pem'
+_SSO_CALLBACK = '/etc/keystone/sso_callback_template.html'
+
+
+FILE_OPTIONS = {
+ None: [
+ cfg.StrOpt('admin_token', secret=True, default='ADMIN',
+ help='A "shared secret" that can be used to bootstrap '
+ 'Keystone. This "token" does not represent a user, '
+ 'and carries no explicit authorization. To disable '
+ 'in production (highly recommended), remove '
+ 'AdminTokenAuthMiddleware from your paste '
+ 'application pipelines (for example, in '
+ 'keystone-paste.ini).'),
+ cfg.IntOpt('compute_port', default=8774,
+ help='(Deprecated) The port which the OpenStack Compute '
+ 'service listens on. This option was only used for '
+ 'string replacement in the templated catalog backend. '
+ 'Templated catalogs should replace the '
+ '"$(compute_port)s" substitution with the static port '
+ 'of the compute service. As of Juno, this option is '
+ 'deprecated and will be removed in the L release.'),
+ cfg.StrOpt('public_endpoint',
+ help='The base public endpoint URL for Keystone that is '
+ 'advertised to clients (NOTE: this does NOT affect '
+ 'how Keystone listens for connections). '
+ 'Defaults to the base host URL of the request. E.g. a '
+ 'request to http://server:5000/v3/users will '
+ 'default to http://server:5000. You should only need '
+ 'to set this value if the base URL contains a path '
+ '(e.g. /prefix/v3) or the endpoint should be found '
+ 'on a different server.'),
+ cfg.StrOpt('admin_endpoint',
+ help='The base admin endpoint URL for Keystone that is '
+ 'advertised to clients (NOTE: this does NOT affect '
+ 'how Keystone listens for connections). '
+ 'Defaults to the base host URL of the request. E.g. a '
+ 'request to http://server:35357/v3/users will '
+ 'default to http://server:35357. You should only need '
+ 'to set this value if the base URL contains a path '
+ '(e.g. /prefix/v3) or the endpoint should be found '
+ 'on a different server.'),
+ cfg.IntOpt('max_project_tree_depth', default=5,
+ help='Maximum depth of the project hierarchy. WARNING: '
+ 'setting it to a large value may adversely impact '
+ 'performance.'),
+ cfg.IntOpt('max_param_size', default=64,
+ help='Limit the sizes of user & project ID/names.'),
+ # we allow tokens to be a bit larger to accommodate PKI
+ cfg.IntOpt('max_token_size', default=8192,
+ help='Similar to max_param_size, but provides an '
+ 'exception for token values.'),
+ cfg.StrOpt('member_role_id',
+ default='9fe2ff9ee4384b1894a90878d3e92bab',
+ help='Similar to the member_role_name option, this '
+ 'represents the default role ID used to associate '
+ 'users with their default projects in the v2 API. '
+ 'This will be used as the explicit role where one is '
+ 'not specified by the v2 API.'),
+ cfg.StrOpt('member_role_name', default='_member_',
+ help='This is the role name used in combination with the '
+ 'member_role_id option; see that option for more '
+ 'detail.'),
+ cfg.IntOpt('crypt_strength', default=40000,
+ help='The value passed as the keyword "rounds" to '
+ 'passlib\'s encrypt method.'),
+ cfg.IntOpt('list_limit',
+ help='The maximum number of entities that will be '
+ 'returned in a collection, with no limit set by '
+ 'default. This global limit may be then overridden '
+ 'for a specific driver, by specifying a list_limit '
+ 'in the appropriate section (e.g. [assignment]).'),
+ cfg.BoolOpt('domain_id_immutable', default=True,
+ help='Set this to false if you want to enable the '
+ 'ability for user, group and project entities '
+ 'to be moved between domains by updating their '
+ 'domain_id. Allowing such movement is not '
+ 'recommended if the scope of a domain admin is being '
+ 'restricted by use of an appropriate policy file '
+ '(see policy.v3cloudsample as an example).'),
+ cfg.BoolOpt('strict_password_check', default=False,
+ help='If set to true, strict password length checking is '
+ 'performed for password manipulation. If a password '
+ 'exceeds the maximum length, the operation will fail '
+ 'with an HTTP 403 Forbidden error. If set to false, '
+ 'passwords are automatically truncated to the '
+ 'maximum length.'),
+ cfg.StrOpt('secure_proxy_ssl_header',
+ help='The HTTP header used to determine the scheme for the '
+ 'original request, even if it was removed by an SSL '
+ 'terminating proxy. Typical value is '
+ '"HTTP_X_FORWARDED_PROTO".'),
+ ],
+ 'identity': [
+ cfg.StrOpt('default_domain_id', default='default',
+ help='This references the domain to use for all '
+ 'Identity API v2 requests (which are not aware of '
+ 'domains). A domain with this ID will be created '
+ 'for you by keystone-manage db_sync in migration '
+ '008. The domain referenced by this ID cannot be '
+ 'deleted on the v3 API, to prevent accidentally '
+ 'breaking the v2 API. There is nothing special about '
+ 'this domain, other than the fact that it must '
+ 'exist to order to maintain support for your v2 '
+ 'clients.'),
+ cfg.BoolOpt('domain_specific_drivers_enabled',
+ default=False,
+ help='A subset (or all) of domains can have their own '
+ 'identity driver, each with their own partial '
+ 'configuration options, stored in either the '
+ 'resource backend or in a file in a domain '
+ 'configuration directory (depending on the setting '
+ 'of domain_configurations_from_database). Only '
+ 'values specific to the domain need to be specified '
+ 'in this manner. This feature is disabled by '
+ 'default; set to true to enable.'),
+ cfg.BoolOpt('domain_configurations_from_database',
+ default=False,
+ help='Extract the domain specific configuration options '
+ 'from the resource backend where they have been '
+ 'stored with the domain data. This feature is '
+ 'disabled by default (in which case the domain '
+ 'specific options will be loaded from files in the '
+ 'domain configuration directory); set to true to '
+ 'enable.'),
+ cfg.StrOpt('domain_config_dir',
+ default='/etc/keystone/domains',
+ help='Path for Keystone to locate the domain specific '
+ 'identity configuration files if '
+ 'domain_specific_drivers_enabled is set to true.'),
+ cfg.StrOpt('driver',
+ default=('keystone.identity.backends'
+ '.sql.Identity'),
+ help='Identity backend driver.'),
+ cfg.BoolOpt('caching', default=True,
+ help='Toggle for identity caching. This has no '
+ 'effect unless global caching is enabled.'),
+ cfg.IntOpt('cache_time', default=600,
+ help='Time to cache identity data (in seconds). This has '
+ 'no effect unless global and identity caching are '
+ 'enabled.'),
+ cfg.IntOpt('max_password_length', default=4096,
+ help='Maximum supported length for user passwords; '
+ 'decrease to improve performance.'),
+ cfg.IntOpt('list_limit',
+ help='Maximum number of entities that will be returned in '
+ 'an identity collection.'),
+ ],
+ 'identity_mapping': [
+ cfg.StrOpt('driver',
+ default=('keystone.identity.mapping_backends'
+ '.sql.Mapping'),
+ help='Keystone Identity Mapping backend driver.'),
+ cfg.StrOpt('generator',
+ default=('keystone.identity.id_generators'
+ '.sha256.Generator'),
+ help='Public ID generator for user and group entities. '
+ 'The Keystone identity mapper only supports '
+ 'generators that produce no more than 64 characters.'),
+ cfg.BoolOpt('backward_compatible_ids',
+ default=True,
+ help='The format of user and group IDs changed '
+ 'in Juno for backends that do not generate UUIDs '
+ '(e.g. LDAP), with keystone providing a hash mapping '
+ 'to the underlying attribute in LDAP. By default '
+ 'this mapping is disabled, which ensures that '
+ 'existing IDs will not change. Even when the '
+ 'mapping is enabled by using domain specific '
+ 'drivers, any users and groups from the default '
+ 'domain being handled by LDAP will still not be '
+ 'mapped to ensure their IDs remain backward '
+ 'compatible. Setting this value to False will '
+ 'enable the mapping for even the default LDAP '
+ 'driver. It is only safe to do this if you do not '
+ 'already have assignments for users and '
+ 'groups from the default LDAP domain, and it is '
+ 'acceptable for Keystone to provide the different '
+ 'IDs to clients than it did previously. Typically '
+ 'this means that the only time you can set this '
+ 'value to False is when configuring a fresh '
+ 'installation.'),
+ ],
+ 'trust': [
+ cfg.BoolOpt('enabled', default=True,
+ help='Delegation and impersonation features can be '
+ 'optionally disabled.'),
+ cfg.BoolOpt('allow_redelegation', default=False,
+ help='Enable redelegation feature.'),
+ cfg.IntOpt('max_redelegation_count', default=3,
+ help='Maximum depth of trust redelegation.'),
+ cfg.StrOpt('driver',
+ default='keystone.trust.backends.sql.Trust',
+ help='Trust backend driver.')],
+ 'os_inherit': [
+ cfg.BoolOpt('enabled', default=False,
+ help='role-assignment inheritance to projects from '
+ 'owning domain or from projects higher in the '
+ 'hierarchy can be optionally enabled.'),
+ ],
+ 'fernet_tokens': [
+ cfg.StrOpt('key_repository',
+ default='/etc/keystone/fernet-keys/',
+ help='Directory containing Fernet token keys.'),
+ cfg.IntOpt('max_active_keys',
+ default=3,
+ help='This controls how many keys are held in rotation by '
+ 'keystone-manage fernet_rotate before they are '
+ 'discarded. The default value of 3 means that '
+ 'keystone will maintain one staged key, one primary '
+ 'key, and one secondary key. Increasing this value '
+ 'means that additional secondary keys will be kept in '
+ 'the rotation.'),
+ ],
+ 'token': [
+ cfg.ListOpt('bind', default=[],
+ help='External auth mechanisms that should add bind '
+ 'information to token, e.g., kerberos,x509.'),
+ cfg.StrOpt('enforce_token_bind', default='permissive',
+ help='Enforcement policy on tokens presented to Keystone '
+ 'with bind information. One of disabled, permissive, '
+ 'strict, required or a specifically required bind '
+ 'mode, e.g., kerberos or x509 to require binding to '
+ 'that authentication.'),
+ cfg.IntOpt('expiration', default=3600,
+ help='Amount of time a token should remain valid '
+ '(in seconds).'),
+ cfg.StrOpt('provider',
+ default='keystone.token.providers.uuid.Provider',
+ help='Controls the token construction, validation, and '
+ 'revocation operations. Core providers are '
+ '"keystone.token.providers.[fernet|pkiz|pki|uuid].'
+ 'Provider".'),
+ cfg.StrOpt('driver',
+ default='keystone.token.persistence.backends.sql.Token',
+ help='Token persistence backend driver.'),
+ cfg.BoolOpt('caching', default=True,
+ help='Toggle for token system caching. This has no '
+ 'effect unless global caching is enabled.'),
+ cfg.IntOpt('cache_time',
+ help='Time to cache tokens (in seconds). This has no '
+ 'effect unless global and token caching are '
+ 'enabled.'),
+ cfg.BoolOpt('revoke_by_id', default=True,
+ help='Revoke token by token identifier. Setting '
+ 'revoke_by_id to true enables various forms of '
+ 'enumerating tokens, e.g. `list tokens for user`. '
+ 'These enumerations are processed to determine the '
+ 'list of tokens to revoke. Only disable if you are '
+ 'switching to using the Revoke extension with a '
+ 'backend other than KVS, which stores events in memory.'),
+ cfg.BoolOpt('allow_rescope_scoped_token', default=True,
+ help='Allow rescoping of scoped token. Setting '
+ 'allow_rescoped_scoped_token to false prevents a user '
+ 'from exchanging a scoped token for any other token.'),
+ cfg.StrOpt('hash_algorithm', default='md5',
+ help="The hash algorithm to use for PKI tokens. This can "
+ "be set to any algorithm that hashlib supports. "
+ "WARNING: Before changing this value, the auth_token "
+ "middleware must be configured with the "
+ "hash_algorithms, otherwise token revocation will "
+ "not be processed correctly."),
+ ],
+ 'revoke': [
+ cfg.StrOpt('driver',
+ default='keystone.contrib.revoke.backends.sql.Revoke',
+ help='An implementation of the backend for persisting '
+ 'revocation events.'),
+ cfg.IntOpt('expiration_buffer', default=1800,
+ help='This value (calculated in seconds) is added to token '
+ 'expiration before a revocation event may be removed '
+ 'from the backend.'),
+ cfg.BoolOpt('caching', default=True,
+ help='Toggle for revocation event caching. This has no '
+ 'effect unless global caching is enabled.'),
+ cfg.IntOpt('cache_time', default=3600,
+ help='Time to cache the revocation list and the revocation '
+ 'events (in seconds). This has no effect unless '
+ 'global and token caching are enabled.',
+ deprecated_opts=[cfg.DeprecatedOpt(
+ 'revocation_cache_time', group='token')]),
+ ],
+ 'cache': [
+ cfg.StrOpt('config_prefix', default='cache.keystone',
+ help='Prefix for building the configuration dictionary '
+ 'for the cache region. This should not need to be '
+ 'changed unless there is another dogpile.cache '
+ 'region with the same configuration name.'),
+ cfg.IntOpt('expiration_time', default=600,
+ help='Default TTL, in seconds, for any cached item in '
+ 'the dogpile.cache region. This applies to any '
+ 'cached method that doesn\'t have an explicit '
+ 'cache expiration time defined for it.'),
+ # NOTE(morganfainberg): the dogpile.cache.memory acceptable in devstack
+ # and other such single-process/thread deployments. Running
+ # dogpile.cache.memory in any other configuration has the same pitfalls
+ # as the KVS token backend. It is recommended that either Redis or
+ # Memcached are used as the dogpile backend for real workloads. To
+ # prevent issues with the memory cache ending up in "production"
+ # unintentionally, we register a no-op as the keystone default caching
+ # backend.
+ cfg.StrOpt('backend', default='keystone.common.cache.noop',
+ help='Dogpile.cache backend module. It is recommended '
+ 'that Memcache with pooling '
+ '(keystone.cache.memcache_pool) or Redis '
+ '(dogpile.cache.redis) be used in production '
+ 'deployments. Small workloads (single process) '
+ 'like devstack can use the dogpile.cache.memory '
+ 'backend.'),
+ cfg.MultiStrOpt('backend_argument', default=[],
+ help='Arguments supplied to the backend module. '
+ 'Specify this option once per argument to be '
+ 'passed to the dogpile.cache backend. Example '
+ 'format: "<argname>:<value>".'),
+ cfg.ListOpt('proxies', default=[],
+ help='Proxy classes to import that will affect the way '
+ 'the dogpile.cache backend functions. See the '
+ 'dogpile.cache documentation on '
+ 'changing-backend-behavior.'),
+ cfg.BoolOpt('enabled', default=False,
+ help='Global toggle for all caching using the '
+ 'should_cache_fn mechanism.'),
+ cfg.BoolOpt('debug_cache_backend', default=False,
+ help='Extra debugging from the cache backend (cache '
+ 'keys, get/set/delete/etc calls). This is only '
+ 'really useful if you need to see the specific '
+ 'cache-backend get/set/delete calls with the '
+ 'keys/values. Typically this should be left set '
+ 'to false.'),
+ cfg.ListOpt('memcache_servers', default=['localhost:11211'],
+ help='Memcache servers in the format of "host:port".'
+ ' (dogpile.cache.memcache and keystone.cache.memcache_pool'
+ ' backends only).'),
+ cfg.IntOpt('memcache_dead_retry',
+ default=5 * 60,
+ help='Number of seconds memcached server is considered dead'
+ ' before it is tried again. (dogpile.cache.memcache and'
+ ' keystone.cache.memcache_pool backends only).'),
+ cfg.IntOpt('memcache_socket_timeout',
+ default=3,
+ help='Timeout in seconds for every call to a server.'
+ ' (dogpile.cache.memcache and keystone.cache.memcache_pool'
+ ' backends only).'),
+ cfg.IntOpt('memcache_pool_maxsize',
+ default=10,
+ help='Max total number of open connections to every'
+ ' memcached server. (keystone.cache.memcache_pool backend'
+ ' only).'),
+ cfg.IntOpt('memcache_pool_unused_timeout',
+ default=60,
+ help='Number of seconds a connection to memcached is held'
+ ' unused in the pool before it is closed.'
+ ' (keystone.cache.memcache_pool backend only).'),
+ cfg.IntOpt('memcache_pool_connection_get_timeout',
+ default=10,
+ help='Number of seconds that an operation will wait to get '
+ 'a memcache client connection.'),
+ ],
+ 'ssl': [
+ cfg.StrOpt('ca_key',
+ default='/etc/keystone/ssl/private/cakey.pem',
+ help='Path of the CA key file for SSL.'),
+ cfg.IntOpt('key_size', default=1024,
+ help='SSL key length (in bits) (auto generated '
+ 'certificate).'),
+ cfg.IntOpt('valid_days', default=3650,
+ help='Days the certificate is valid for once signed '
+ '(auto generated certificate).'),
+ cfg.StrOpt('cert_subject',
+ default='/C=US/ST=Unset/L=Unset/O=Unset/CN=localhost',
+ help='SSL certificate subject (auto generated '
+ 'certificate).'),
+ ],
+ 'signing': [
+ cfg.StrOpt('certfile',
+ default=_CERTFILE,
+ help='Path of the certfile for token signing. For '
+ 'non-production environments, you may be interested '
+ 'in using `keystone-manage pki_setup` to generate '
+ 'self-signed certificates.'),
+ cfg.StrOpt('keyfile',
+ default=_KEYFILE,
+ help='Path of the keyfile for token signing.'),
+ cfg.StrOpt('ca_certs',
+ default='/etc/keystone/ssl/certs/ca.pem',
+ help='Path of the CA for token signing.'),
+ cfg.StrOpt('ca_key',
+ default='/etc/keystone/ssl/private/cakey.pem',
+ help='Path of the CA key for token signing.'),
+ cfg.IntOpt('key_size', default=2048,
+ help='Key size (in bits) for token signing cert '
+ '(auto generated certificate).'),
+ cfg.IntOpt('valid_days', default=3650,
+ help='Days the token signing cert is valid for '
+ '(auto generated certificate).'),
+ cfg.StrOpt('cert_subject',
+ default=('/C=US/ST=Unset/L=Unset/O=Unset/'
+ 'CN=www.example.com'),
+ help='Certificate subject (auto generated certificate) for '
+ 'token signing.'),
+ ],
+ 'assignment': [
+ # assignment has no default for backward compatibility reasons.
+ # If assignment driver is not specified, the identity driver chooses
+ # the backend
+ cfg.StrOpt('driver',
+ help='Assignment backend driver.'),
+ ],
+ 'resource': [
+ cfg.StrOpt('driver',
+ help='Resource backend driver. If a resource driver is '
+ 'not specified, the assignment driver will choose '
+ 'the resource driver.'),
+ cfg.BoolOpt('caching', default=True,
+ deprecated_opts=[cfg.DeprecatedOpt('caching',
+ group='assignment')],
+ help='Toggle for resource caching. This has no effect '
+ 'unless global caching is enabled.'),
+ cfg.IntOpt('cache_time',
+ deprecated_opts=[cfg.DeprecatedOpt('cache_time',
+ group='assignment')],
+ help='TTL (in seconds) to cache resource data. This has '
+ 'no effect unless global caching is enabled.'),
+ cfg.IntOpt('list_limit',
+ deprecated_opts=[cfg.DeprecatedOpt('list_limit',
+ group='assignment')],
+ help='Maximum number of entities that will be returned '
+ 'in a resource collection.'),
+ ],
+ 'domain_config': [
+ cfg.StrOpt('driver',
+ default='keystone.resource.config_backends.sql.'
+ 'DomainConfig',
+ help='Domain config backend driver.'),
+ ],
+ 'role': [
+ # The role driver has no default for backward compatibility reasons.
+ # If role driver is not specified, the assignment driver chooses
+ # the backend
+ cfg.StrOpt('driver',
+ help='Role backend driver.'),
+ cfg.BoolOpt('caching', default=True,
+ help='Toggle for role caching. This has no effect '
+ 'unless global caching is enabled.'),
+ cfg.IntOpt('cache_time',
+ help='TTL (in seconds) to cache role data. This has '
+ 'no effect unless global caching is enabled.'),
+ cfg.IntOpt('list_limit',
+ help='Maximum number of entities that will be returned '
+ 'in a role collection.'),
+ ],
+ 'credential': [
+ cfg.StrOpt('driver',
+ default=('keystone.credential.backends'
+ '.sql.Credential'),
+ help='Credential backend driver.'),
+ ],
+ 'oauth1': [
+ cfg.StrOpt('driver',
+ default='keystone.contrib.oauth1.backends.sql.OAuth1',
+ help='Credential backend driver.'),
+ cfg.IntOpt('request_token_duration', default=28800,
+ help='Duration (in seconds) for the OAuth Request Token.'),
+ cfg.IntOpt('access_token_duration', default=86400,
+ help='Duration (in seconds) for the OAuth Access Token.'),
+ ],
+ 'federation': [
+ cfg.StrOpt('driver',
+ default='keystone.contrib.federation.'
+ 'backends.sql.Federation',
+ help='Federation backend driver.'),
+ cfg.StrOpt('assertion_prefix', default='',
+ help='Value to be used when filtering assertion parameters '
+ 'from the environment.'),
+ cfg.StrOpt('remote_id_attribute',
+ help='Value to be used to obtain the entity ID of the '
+ 'Identity Provider from the environment (e.g. if '
+ 'using the mod_shib plugin this value is '
+ '`Shib-Identity-Provider`).'),
+ cfg.StrOpt('federated_domain_name', default='Federated',
+ help='A domain name that is reserved to allow federated '
+ 'ephemeral users to have a domain concept. Note that '
+ 'an admin will not be able to create a domain with '
+ 'this name or update an existing domain to this '
+ 'name. You are not advised to change this value '
+ 'unless you really have to. Changing this option '
+ 'to empty string or None will not have any impact and '
+ 'default name will be used.'),
+ cfg.MultiStrOpt('trusted_dashboard', default=[],
+ help='A list of trusted dashboard hosts. Before '
+ 'accepting a Single Sign-On request to return a '
+ 'token, the origin host must be a member of the '
+ 'trusted_dashboard list. This configuration '
+ 'option may be repeated for multiple values. '
+ 'For example: trusted_dashboard=http://acme.com '
+ 'trusted_dashboard=http://beta.com'),
+ cfg.StrOpt('sso_callback_template', default=_SSO_CALLBACK,
+ help='Location of Single Sign-On callback handler, will '
+ 'return a token to a trusted dashboard host.'),
+ ],
+ 'policy': [
+ cfg.StrOpt('driver',
+ default='keystone.policy.backends.sql.Policy',
+ help='Policy backend driver.'),
+ cfg.IntOpt('list_limit',
+ help='Maximum number of entities that will be returned '
+ 'in a policy collection.'),
+ ],
+ 'endpoint_filter': [
+ cfg.StrOpt('driver',
+ default='keystone.contrib.endpoint_filter.backends'
+ '.sql.EndpointFilter',
+ help='Endpoint Filter backend driver'),
+ cfg.BoolOpt('return_all_endpoints_if_no_filter', default=True,
+ help='Toggle to return all active endpoints if no filter '
+ 'exists.'),
+ ],
+ 'endpoint_policy': [
+ cfg.StrOpt('driver',
+ default='keystone.contrib.endpoint_policy.backends'
+ '.sql.EndpointPolicy',
+ help='Endpoint policy backend driver'),
+ ],
+ 'ldap': [
+ cfg.StrOpt('url', default='ldap://localhost',
+ help='URL for connecting to the LDAP server.'),
+ cfg.StrOpt('user',
+ help='User BindDN to query the LDAP server.'),
+ cfg.StrOpt('password', secret=True,
+ help='Password for the BindDN to query the LDAP server.'),
+ cfg.StrOpt('suffix', default='cn=example,cn=com',
+ help='LDAP server suffix'),
+ cfg.BoolOpt('use_dumb_member', default=False,
+ help='If true, will add a dummy member to groups. This is '
+ 'required if the objectclass for groups requires the '
+ '"member" attribute.'),
+ cfg.StrOpt('dumb_member', default='cn=dumb,dc=nonexistent',
+ help='DN of the "dummy member" to use when '
+ '"use_dumb_member" is enabled.'),
+ cfg.BoolOpt('allow_subtree_delete', default=False,
+ help='Delete subtrees using the subtree delete control. '
+ 'Only enable this option if your LDAP server '
+ 'supports subtree deletion.'),
+ cfg.StrOpt('query_scope', default='one',
+ help='The LDAP scope for queries, this can be either '
+ '"one" (onelevel/singleLevel) or "sub" '
+ '(subtree/wholeSubtree).'),
+ cfg.IntOpt('page_size', default=0,
+ help='Maximum results per page; a value of zero ("0") '
+ 'disables paging.'),
+ cfg.StrOpt('alias_dereferencing', default='default',
+ help='The LDAP dereferencing option for queries. This '
+ 'can be either "never", "searching", "always", '
+ '"finding" or "default". The "default" option falls '
+ 'back to using default dereferencing configured by '
+ 'your ldap.conf.'),
+ cfg.IntOpt('debug_level',
+ help='Sets the LDAP debugging level for LDAP calls. '
+ 'A value of 0 means that debugging is not enabled. '
+ 'This value is a bitmask, consult your LDAP '
+ 'documentation for possible values.'),
+ cfg.BoolOpt('chase_referrals',
+ help='Override the system\'s default referral chasing '
+ 'behavior for queries.'),
+ cfg.StrOpt('user_tree_dn',
+ help='Search base for users.'),
+ cfg.StrOpt('user_filter',
+ help='LDAP search filter for users.'),
+ cfg.StrOpt('user_objectclass', default='inetOrgPerson',
+ help='LDAP objectclass for users.'),
+ cfg.StrOpt('user_id_attribute', default='cn',
+ help='LDAP attribute mapped to user id. '
+ 'WARNING: must not be a multivalued attribute.'),
+ cfg.StrOpt('user_name_attribute', default='sn',
+ help='LDAP attribute mapped to user name.'),
+ cfg.StrOpt('user_mail_attribute', default='mail',
+ help='LDAP attribute mapped to user email.'),
+ cfg.StrOpt('user_pass_attribute', default='userPassword',
+ help='LDAP attribute mapped to password.'),
+ cfg.StrOpt('user_enabled_attribute', default='enabled',
+ help='LDAP attribute mapped to user enabled flag.'),
+ cfg.BoolOpt('user_enabled_invert', default=False,
+ help='Invert the meaning of the boolean enabled values. '
+ 'Some LDAP servers use a boolean lock attribute '
+ 'where "true" means an account is disabled. Setting '
+ '"user_enabled_invert = true" will allow these lock '
+ 'attributes to be used. This setting will have no '
+ 'effect if "user_enabled_mask" or '
+ '"user_enabled_emulation" settings are in use.'),
+ cfg.IntOpt('user_enabled_mask', default=0,
+ help='Bitmask integer to indicate the bit that the enabled '
+ 'value is stored in if the LDAP server represents '
+ '"enabled" as a bit on an integer rather than a '
+ 'boolean. A value of "0" indicates the mask is not '
+ 'used. If this is not set to "0" the typical value '
+ 'is "2". This is typically used when '
+ '"user_enabled_attribute = userAccountControl".'),
+ cfg.StrOpt('user_enabled_default', default='True',
+ help='Default value to enable users. This should match an '
+ 'appropriate int value if the LDAP server uses '
+ 'non-boolean (bitmask) values to indicate if a user '
+ 'is enabled or disabled. If this is not set to "True" '
+ 'the typical value is "512". This is typically used '
+ 'when "user_enabled_attribute = userAccountControl".'),
+ cfg.ListOpt('user_attribute_ignore',
+ default=['default_project_id', 'tenants'],
+ help='List of attributes stripped off the user on '
+ 'update.'),
+ cfg.StrOpt('user_default_project_id_attribute',
+ help='LDAP attribute mapped to default_project_id for '
+ 'users.'),
+ cfg.BoolOpt('user_allow_create', default=True,
+ help='Allow user creation in LDAP backend.'),
+ cfg.BoolOpt('user_allow_update', default=True,
+ help='Allow user updates in LDAP backend.'),
+ cfg.BoolOpt('user_allow_delete', default=True,
+ help='Allow user deletion in LDAP backend.'),
+ cfg.BoolOpt('user_enabled_emulation', default=False,
+ help='If true, Keystone uses an alternative method to '
+ 'determine if a user is enabled or not by checking '
+ 'if they are a member of the '
+ '"user_enabled_emulation_dn" group.'),
+ cfg.StrOpt('user_enabled_emulation_dn',
+ help='DN of the group entry to hold enabled users when '
+ 'using enabled emulation.'),
+ cfg.ListOpt('user_additional_attribute_mapping',
+ default=[],
+ help='List of additional LDAP attributes used for mapping '
+ 'additional attribute mappings for users. Attribute '
+ 'mapping format is <ldap_attr>:<user_attr>, where '
+ 'ldap_attr is the attribute in the LDAP entry and '
+ 'user_attr is the Identity API attribute.'),
+
+ cfg.StrOpt('project_tree_dn',
+ deprecated_opts=[cfg.DeprecatedOpt(
+ 'tenant_tree_dn', group='ldap')],
+ help='Search base for projects'),
+ cfg.StrOpt('project_filter',
+ deprecated_opts=[cfg.DeprecatedOpt(
+ 'tenant_filter', group='ldap')],
+ help='LDAP search filter for projects.'),
+ cfg.StrOpt('project_objectclass', default='groupOfNames',
+ deprecated_opts=[cfg.DeprecatedOpt(
+ 'tenant_objectclass', group='ldap')],
+ help='LDAP objectclass for projects.'),
+ cfg.StrOpt('project_id_attribute', default='cn',
+ deprecated_opts=[cfg.DeprecatedOpt(
+ 'tenant_id_attribute', group='ldap')],
+ help='LDAP attribute mapped to project id.'),
+ cfg.StrOpt('project_member_attribute', default='member',
+ deprecated_opts=[cfg.DeprecatedOpt(
+ 'tenant_member_attribute', group='ldap')],
+ help='LDAP attribute mapped to project membership for '
+ 'user.'),
+ cfg.StrOpt('project_name_attribute', default='ou',
+ deprecated_opts=[cfg.DeprecatedOpt(
+ 'tenant_name_attribute', group='ldap')],
+ help='LDAP attribute mapped to project name.'),
+ cfg.StrOpt('project_desc_attribute', default='description',
+ deprecated_opts=[cfg.DeprecatedOpt(
+ 'tenant_desc_attribute', group='ldap')],
+ help='LDAP attribute mapped to project description.'),
+ cfg.StrOpt('project_enabled_attribute', default='enabled',
+ deprecated_opts=[cfg.DeprecatedOpt(
+ 'tenant_enabled_attribute', group='ldap')],
+ help='LDAP attribute mapped to project enabled.'),
+ cfg.StrOpt('project_domain_id_attribute',
+ deprecated_opts=[cfg.DeprecatedOpt(
+ 'tenant_domain_id_attribute', group='ldap')],
+ default='businessCategory',
+ help='LDAP attribute mapped to project domain_id.'),
+ cfg.ListOpt('project_attribute_ignore', default=[],
+ deprecated_opts=[cfg.DeprecatedOpt(
+ 'tenant_attribute_ignore', group='ldap')],
+ help='List of attributes stripped off the project on '
+ 'update.'),
+ cfg.BoolOpt('project_allow_create', default=True,
+ deprecated_opts=[cfg.DeprecatedOpt(
+ 'tenant_allow_create', group='ldap')],
+ help='Allow project creation in LDAP backend.'),
+ cfg.BoolOpt('project_allow_update', default=True,
+ deprecated_opts=[cfg.DeprecatedOpt(
+ 'tenant_allow_update', group='ldap')],
+ help='Allow project update in LDAP backend.'),
+ cfg.BoolOpt('project_allow_delete', default=True,
+ deprecated_opts=[cfg.DeprecatedOpt(
+ 'tenant_allow_delete', group='ldap')],
+ help='Allow project deletion in LDAP backend.'),
+ cfg.BoolOpt('project_enabled_emulation', default=False,
+ deprecated_opts=[cfg.DeprecatedOpt(
+ 'tenant_enabled_emulation', group='ldap')],
+ help='If true, Keystone uses an alternative method to '
+ 'determine if a project is enabled or not by '
+ 'checking if they are a member of the '
+ '"project_enabled_emulation_dn" group.'),
+ cfg.StrOpt('project_enabled_emulation_dn',
+ deprecated_opts=[cfg.DeprecatedOpt(
+ 'tenant_enabled_emulation_dn', group='ldap')],
+ help='DN of the group entry to hold enabled projects when '
+ 'using enabled emulation.'),
+ cfg.ListOpt('project_additional_attribute_mapping',
+ deprecated_opts=[cfg.DeprecatedOpt(
+ 'tenant_additional_attribute_mapping', group='ldap')],
+ default=[],
+ help='Additional attribute mappings for projects. '
+ 'Attribute mapping format is '
+ '<ldap_attr>:<user_attr>, where ldap_attr is the '
+ 'attribute in the LDAP entry and user_attr is the '
+ 'Identity API attribute.'),
+
+ cfg.StrOpt('role_tree_dn',
+ help='Search base for roles.'),
+ cfg.StrOpt('role_filter',
+ help='LDAP search filter for roles.'),
+ cfg.StrOpt('role_objectclass', default='organizationalRole',
+ help='LDAP objectclass for roles.'),
+ cfg.StrOpt('role_id_attribute', default='cn',
+ help='LDAP attribute mapped to role id.'),
+ cfg.StrOpt('role_name_attribute', default='ou',
+ help='LDAP attribute mapped to role name.'),
+ cfg.StrOpt('role_member_attribute', default='roleOccupant',
+ help='LDAP attribute mapped to role membership.'),
+ cfg.ListOpt('role_attribute_ignore', default=[],
+ help='List of attributes stripped off the role on '
+ 'update.'),
+ cfg.BoolOpt('role_allow_create', default=True,
+ help='Allow role creation in LDAP backend.'),
+ cfg.BoolOpt('role_allow_update', default=True,
+ help='Allow role update in LDAP backend.'),
+ cfg.BoolOpt('role_allow_delete', default=True,
+ help='Allow role deletion in LDAP backend.'),
+ cfg.ListOpt('role_additional_attribute_mapping',
+ default=[],
+ help='Additional attribute mappings for roles. Attribute '
+ 'mapping format is <ldap_attr>:<user_attr>, where '
+ 'ldap_attr is the attribute in the LDAP entry and '
+ 'user_attr is the Identity API attribute.'),
+
+ cfg.StrOpt('group_tree_dn',
+ help='Search base for groups.'),
+ cfg.StrOpt('group_filter',
+ help='LDAP search filter for groups.'),
+ cfg.StrOpt('group_objectclass', default='groupOfNames',
+ help='LDAP objectclass for groups.'),
+ cfg.StrOpt('group_id_attribute', default='cn',
+ help='LDAP attribute mapped to group id.'),
+ cfg.StrOpt('group_name_attribute', default='ou',
+ help='LDAP attribute mapped to group name.'),
+ cfg.StrOpt('group_member_attribute', default='member',
+ help='LDAP attribute mapped to show group membership.'),
+ cfg.StrOpt('group_desc_attribute', default='description',
+ help='LDAP attribute mapped to group description.'),
+ cfg.ListOpt('group_attribute_ignore', default=[],
+ help='List of attributes stripped off the group on '
+ 'update.'),
+ cfg.BoolOpt('group_allow_create', default=True,
+ help='Allow group creation in LDAP backend.'),
+ cfg.BoolOpt('group_allow_update', default=True,
+ help='Allow group update in LDAP backend.'),
+ cfg.BoolOpt('group_allow_delete', default=True,
+ help='Allow group deletion in LDAP backend.'),
+ cfg.ListOpt('group_additional_attribute_mapping',
+ default=[],
+ help='Additional attribute mappings for groups. Attribute '
+ 'mapping format is <ldap_attr>:<user_attr>, where '
+ 'ldap_attr is the attribute in the LDAP entry and '
+ 'user_attr is the Identity API attribute.'),
+
+ cfg.StrOpt('tls_cacertfile',
+ help='CA certificate file path for communicating with '
+ 'LDAP servers.'),
+ cfg.StrOpt('tls_cacertdir',
+ help='CA certificate directory path for communicating with '
+ 'LDAP servers.'),
+ cfg.BoolOpt('use_tls', default=False,
+ help='Enable TLS for communicating with LDAP servers.'),
+ cfg.StrOpt('tls_req_cert', default='demand',
+ help='Valid options for tls_req_cert are demand, never, '
+ 'and allow.'),
+ cfg.BoolOpt('use_pool', default=False,
+ help='Enable LDAP connection pooling.'),
+ cfg.IntOpt('pool_size', default=10,
+ help='Connection pool size.'),
+ cfg.IntOpt('pool_retry_max', default=3,
+ help='Maximum count of reconnect trials.'),
+ cfg.FloatOpt('pool_retry_delay', default=0.1,
+ help='Time span in seconds to wait between two '
+ 'reconnect trials.'),
+ cfg.IntOpt('pool_connection_timeout', default=-1,
+ help='Connector timeout in seconds. Value -1 indicates '
+ 'indefinite wait for response.'),
+ cfg.IntOpt('pool_connection_lifetime', default=600,
+ help='Connection lifetime in seconds.'),
+ cfg.BoolOpt('use_auth_pool', default=False,
+ help='Enable LDAP connection pooling for end user '
+ 'authentication. If use_pool is disabled, then this '
+ 'setting is meaningless and is not used at all.'),
+ cfg.IntOpt('auth_pool_size', default=100,
+ help='End user auth connection pool size.'),
+ cfg.IntOpt('auth_pool_connection_lifetime', default=60,
+ help='End user auth connection lifetime in seconds.'),
+ ],
+ 'auth': [
+ cfg.ListOpt('methods', default=_DEFAULT_AUTH_METHODS,
+ help='Default auth methods.'),
+ cfg.StrOpt('password',
+ default='keystone.auth.plugins.password.Password',
+ help='The password auth plugin module.'),
+ cfg.StrOpt('token',
+ default='keystone.auth.plugins.token.Token',
+ help='The token auth plugin module.'),
+ # deals with REMOTE_USER authentication
+ cfg.StrOpt('external',
+ default='keystone.auth.plugins.external.DefaultDomain',
+ help='The external (REMOTE_USER) auth plugin module.'),
+ cfg.StrOpt('oauth1',
+ default='keystone.auth.plugins.oauth1.OAuth',
+ help='The oAuth1.0 auth plugin module.'),
+ ],
+ 'paste_deploy': [
+ cfg.StrOpt('config_file', default='keystone-paste.ini',
+ help='Name of the paste configuration file that defines '
+ 'the available pipelines.'),
+ ],
+ 'memcache': [
+ cfg.ListOpt('servers', default=['localhost:11211'],
+ help='Memcache servers in the format of "host:port".'),
+ cfg.IntOpt('dead_retry',
+ default=5 * 60,
+ help='Number of seconds memcached server is considered dead'
+ ' before it is tried again. This is used by the key '
+ 'value store system (e.g. token '
+ 'pooled memcached persistence backend).'),
+ cfg.IntOpt('socket_timeout',
+ default=3,
+ help='Timeout in seconds for every call to a server. This '
+ 'is used by the key value store system (e.g. token '
+ 'pooled memcached persistence backend).'),
+ cfg.IntOpt('pool_maxsize',
+ default=10,
+ help='Max total number of open connections to every'
+ ' memcached server. This is used by the key value '
+ 'store system (e.g. token pooled memcached '
+ 'persistence backend).'),
+ cfg.IntOpt('pool_unused_timeout',
+ default=60,
+ help='Number of seconds a connection to memcached is held'
+ ' unused in the pool before it is closed. This is used'
+ ' by the key value store system (e.g. token pooled '
+ 'memcached persistence backend).'),
+ cfg.IntOpt('pool_connection_get_timeout',
+ default=10,
+ help='Number of seconds that an operation will wait to get '
+ 'a memcache client connection. This is used by the '
+ 'key value store system (e.g. token pooled memcached '
+ 'persistence backend).'),
+ ],
+ 'catalog': [
+ cfg.StrOpt('template_file',
+ default='default_catalog.templates',
+ help='Catalog template file name for use with the '
+ 'template catalog backend.'),
+ cfg.StrOpt('driver',
+ default='keystone.catalog.backends.sql.Catalog',
+ help='Catalog backend driver.'),
+ cfg.BoolOpt('caching', default=True,
+ help='Toggle for catalog caching. This has no '
+ 'effect unless global caching is enabled.'),
+ cfg.IntOpt('cache_time',
+ help='Time to cache catalog data (in seconds). This has no '
+ 'effect unless global and catalog caching are '
+ 'enabled.'),
+ cfg.IntOpt('list_limit',
+ help='Maximum number of entities that will be returned '
+ 'in a catalog collection.'),
+ ],
+ 'kvs': [
+ cfg.ListOpt('backends', default=[],
+ help='Extra dogpile.cache backend modules to register '
+ 'with the dogpile.cache library.'),
+ cfg.StrOpt('config_prefix', default='keystone.kvs',
+ help='Prefix for building the configuration dictionary '
+ 'for the KVS region. This should not need to be '
+ 'changed unless there is another dogpile.cache '
+ 'region with the same configuration name.'),
+ cfg.BoolOpt('enable_key_mangler', default=True,
+ help='Toggle to disable using a key-mangling function '
+ 'to ensure fixed length keys. This is toggle-able '
+ 'for debugging purposes, it is highly recommended '
+ 'to always leave this set to true.'),
+ cfg.IntOpt('default_lock_timeout', default=5,
+ help='Default lock timeout (in seconds) for distributed '
+ 'locking.'),
+ ],
+ 'saml': [
+ cfg.IntOpt('assertion_expiration_time', default=3600,
+ help='Default TTL, in seconds, for any generated SAML '
+ 'assertion created by Keystone.'),
+ cfg.StrOpt('xmlsec1_binary',
+ default='xmlsec1',
+ help='Binary to be called for XML signing. Install the '
+ 'appropriate package, specify absolute path or adjust '
+ 'your PATH environment variable if the binary cannot '
+ 'be found.'),
+ cfg.StrOpt('certfile',
+ default=_CERTFILE,
+ help='Path of the certfile for SAML signing. For '
+ 'non-production environments, you may be interested '
+ 'in using `keystone-manage pki_setup` to generate '
+ 'self-signed certificates. Note, the path cannot '
+ 'contain a comma.'),
+ cfg.StrOpt('keyfile',
+ default=_KEYFILE,
+ help='Path of the keyfile for SAML signing. Note, the path '
+ 'cannot contain a comma.'),
+ cfg.StrOpt('idp_entity_id',
+ help='Entity ID value for unique Identity Provider '
+ 'identification. Usually FQDN is set with a suffix. '
+ 'A value is required to generate IDP Metadata. '
+ 'For example: https://keystone.example.com/v3/'
+ 'OS-FEDERATION/saml2/idp'),
+ cfg.StrOpt('idp_sso_endpoint',
+ help='Identity Provider Single-Sign-On service value, '
+ 'required in the Identity Provider\'s metadata. '
+ 'A value is required to generate IDP Metadata. '
+ 'For example: https://keystone.example.com/v3/'
+ 'OS-FEDERATION/saml2/sso'),
+ cfg.StrOpt('idp_lang', default='en',
+ help='Language used by the organization.'),
+ cfg.StrOpt('idp_organization_name',
+ help='Organization name the installation belongs to.'),
+ cfg.StrOpt('idp_organization_display_name',
+ help='Organization name to be displayed.'),
+ cfg.StrOpt('idp_organization_url',
+ help='URL of the organization.'),
+ cfg.StrOpt('idp_contact_company',
+ help='Company of contact person.'),
+ cfg.StrOpt('idp_contact_name',
+ help='Given name of contact person'),
+ cfg.StrOpt('idp_contact_surname',
+ help='Surname of contact person.'),
+ cfg.StrOpt('idp_contact_email',
+ help='Email address of contact person.'),
+ cfg.StrOpt('idp_contact_telephone',
+ help='Telephone number of contact person.'),
+ cfg.StrOpt('idp_contact_type', default='other',
+ help='Contact type. Allowed values are: '
+ 'technical, support, administrative '
+ 'billing, and other'),
+ cfg.StrOpt('idp_metadata_path',
+ default='/etc/keystone/saml2_idp_metadata.xml',
+ help='Path to the Identity Provider Metadata file. '
+ 'This file should be generated with the '
+ 'keystone-manage saml_idp_metadata command.'),
+ ],
+ 'eventlet_server': [
+ cfg.IntOpt('public_workers',
+ deprecated_name='public_workers',
+ deprecated_group='DEFAULT',
+ help='The number of worker processes to serve the public '
+ 'eventlet application. Defaults to number of CPUs '
+ '(minimum of 2).'),
+ cfg.IntOpt('admin_workers',
+ deprecated_name='admin_workers',
+ deprecated_group='DEFAULT',
+ help='The number of worker processes to serve the admin '
+ 'eventlet application. Defaults to number of CPUs '
+ '(minimum of 2).'),
+ cfg.StrOpt('public_bind_host',
+ default='0.0.0.0',
+ deprecated_opts=[cfg.DeprecatedOpt('bind_host',
+ group='DEFAULT'),
+ cfg.DeprecatedOpt('public_bind_host',
+ group='DEFAULT'), ],
+ help='The IP address of the network interface for the '
+ 'public service to listen on.'),
+ cfg.IntOpt('public_port', default=5000, deprecated_name='public_port',
+ deprecated_group='DEFAULT',
+ help='The port number which the public service listens '
+ 'on.'),
+ cfg.StrOpt('admin_bind_host',
+ default='0.0.0.0',
+ deprecated_opts=[cfg.DeprecatedOpt('bind_host',
+ group='DEFAULT'),
+ cfg.DeprecatedOpt('admin_bind_host',
+ group='DEFAULT')],
+ help='The IP address of the network interface for the '
+ 'admin service to listen on.'),
+ cfg.IntOpt('admin_port', default=35357, deprecated_name='admin_port',
+ deprecated_group='DEFAULT',
+ help='The port number which the admin service listens '
+ 'on.'),
+ cfg.BoolOpt('tcp_keepalive', default=False,
+ deprecated_name='tcp_keepalive',
+ deprecated_group='DEFAULT',
+ help='Set this to true if you want to enable '
+ 'TCP_KEEPALIVE on server sockets, i.e. sockets used '
+ 'by the Keystone wsgi server for client '
+ 'connections.'),
+ cfg.IntOpt('tcp_keepidle',
+ default=600,
+ deprecated_name='tcp_keepidle',
+ deprecated_group='DEFAULT',
+ help='Sets the value of TCP_KEEPIDLE in seconds for each '
+ 'server socket. Only applies if tcp_keepalive is '
+ 'true.'),
+ ],
+ 'eventlet_server_ssl': [
+ cfg.BoolOpt('enable', default=False, deprecated_name='enable',
+ deprecated_group='ssl',
+ help='Toggle for SSL support on the Keystone '
+ 'eventlet servers.'),
+ cfg.StrOpt('certfile',
+ default="/etc/keystone/ssl/certs/keystone.pem",
+ deprecated_name='certfile', deprecated_group='ssl',
+ help='Path of the certfile for SSL. For non-production '
+ 'environments, you may be interested in using '
+ '`keystone-manage ssl_setup` to generate self-signed '
+ 'certificates.'),
+ cfg.StrOpt('keyfile',
+ default='/etc/keystone/ssl/private/keystonekey.pem',
+ deprecated_name='keyfile', deprecated_group='ssl',
+ help='Path of the keyfile for SSL.'),
+ cfg.StrOpt('ca_certs',
+ default='/etc/keystone/ssl/certs/ca.pem',
+ deprecated_name='ca_certs', deprecated_group='ssl',
+ help='Path of the CA cert file for SSL.'),
+ cfg.BoolOpt('cert_required', default=False,
+ deprecated_name='cert_required', deprecated_group='ssl',
+ help='Require client certificate.'),
+ ],
+}
+
+
+CONF = cfg.CONF
+oslo_messaging.set_transport_defaults(control_exchange='keystone')
+
+
+def _register_auth_plugin_opt(conf, option):
+ conf.register_opt(option, group='auth')
+
+
+def setup_authentication(conf=None):
+ # register any non-default auth methods here (used by extensions, etc)
+ if conf is None:
+ conf = CONF
+ for method_name in conf.auth.methods:
+ if method_name not in _DEFAULT_AUTH_METHODS:
+ option = cfg.StrOpt(method_name)
+ _register_auth_plugin_opt(conf, option)
+
+
+def configure(conf=None):
+ if conf is None:
+ conf = CONF
+
+ conf.register_cli_opt(
+ cfg.BoolOpt('standard-threads', default=False,
+ help='Do not monkey-patch threading system modules.'))
+ conf.register_cli_opt(
+ cfg.StrOpt('pydev-debug-host',
+ help='Host to connect to for remote debugger.'))
+ conf.register_cli_opt(
+ cfg.IntOpt('pydev-debug-port',
+ help='Port to connect to for remote debugger.'))
+
+ for section in FILE_OPTIONS:
+ for option in FILE_OPTIONS[section]:
+ if section:
+ conf.register_opt(option, group=section)
+ else:
+ conf.register_opt(option)
+
+ # register any non-default auth methods here (used by extensions, etc)
+ setup_authentication(conf)
+
+
+def list_opts():
+ """Return a list of oslo_config options available in Keystone.
+
+ The returned list includes all oslo_config options which are registered as
+ the "FILE_OPTIONS" in keystone.common.config. This list will not include
+ the options from the oslo-incubator library or any options registered
+ dynamically at run time.
+
+ Each object in the list is a two element tuple. The first element of
+ each tuple is the name of the group under which the list of options in the
+ second element will be registered. A group name of None corresponds to the
+ [DEFAULT] group in config files.
+
+ This function is also discoverable via the 'oslo_config.opts' entry point
+ under the 'keystone.config.opts' namespace.
+
+ The purpose of this is to allow tools like the Oslo sample config file
+ generator to discover the options exposed to users by this library.
+
+ :returns: a list of (group_name, opts) tuples
+ """
+ return FILE_OPTIONS.items()
diff --git a/keystone-moon/keystone/common/controller.py b/keystone-moon/keystone/common/controller.py
new file mode 100644
index 00000000..bd26b7c4
--- /dev/null
+++ b/keystone-moon/keystone/common/controller.py
@@ -0,0 +1,800 @@
+# Copyright 2013 OpenStack Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import functools
+import uuid
+
+from oslo_config import cfg
+from oslo_log import log
+import six
+
+from keystone.common import authorization
+from keystone.common import dependency
+from keystone.common import driver_hints
+from keystone.common import utils
+from keystone.common import wsgi
+from keystone import exception
+from keystone.i18n import _, _LW
+from keystone.models import token_model
+
+
+LOG = log.getLogger(__name__)
+CONF = cfg.CONF
+
+
+def v2_deprecated(f):
+ """No-op decorator in preparation for deprecating Identity API v2.
+
+ This is a placeholder for the pending deprecation of v2. The implementation
+ of this decorator can be replaced with::
+
+ from keystone.openstack.common import versionutils
+
+
+ v2_deprecated = versionutils.deprecated(
+ what='v2 API',
+ as_of=versionutils.deprecated.JUNO,
+ in_favor_of='v3 API')
+
+ """
+ return f
+
+
+def _build_policy_check_credentials(self, action, context, kwargs):
+ LOG.debug('RBAC: Authorizing %(action)s(%(kwargs)s)', {
+ 'action': action,
+ 'kwargs': ', '.join(['%s=%s' % (k, kwargs[k]) for k in kwargs])})
+
+ # see if auth context has already been created. If so use it.
+ if ('environment' in context and
+ authorization.AUTH_CONTEXT_ENV in context['environment']):
+ LOG.debug('RBAC: using auth context from the request environment')
+ return context['environment'].get(authorization.AUTH_CONTEXT_ENV)
+
+ # There is no current auth context, build it from the incoming token.
+ # TODO(morganfainberg): Collapse this logic with AuthContextMiddleware
+ # in a sane manner as this just mirrors the logic in AuthContextMiddleware
+ try:
+ LOG.debug('RBAC: building auth context from the incoming auth token')
+ token_ref = token_model.KeystoneToken(
+ token_id=context['token_id'],
+ token_data=self.token_provider_api.validate_token(
+ context['token_id']))
+ # NOTE(jamielennox): whilst this maybe shouldn't be within this
+ # function it would otherwise need to reload the token_ref from
+ # backing store.
+ wsgi.validate_token_bind(context, token_ref)
+ except exception.TokenNotFound:
+ LOG.warning(_LW('RBAC: Invalid token'))
+ raise exception.Unauthorized()
+
+ auth_context = authorization.token_to_auth_context(token_ref)
+
+ return auth_context
+
+
+def protected(callback=None):
+ """Wraps API calls with role based access controls (RBAC).
+
+ This handles both the protection of the API parameters as well as any
+ target entities for single-entity API calls.
+
+ More complex API calls (for example that deal with several different
+ entities) should pass in a callback function, that will be subsequently
+ called to check protection for these multiple entities. This callback
+ function should gather the appropriate entities needed and then call
+ check_protection() in the V3Controller class.
+
+ """
+ def wrapper(f):
+ @functools.wraps(f)
+ def inner(self, context, *args, **kwargs):
+ if 'is_admin' in context and context['is_admin']:
+ LOG.warning(_LW('RBAC: Bypassing authorization'))
+ elif callback is not None:
+ prep_info = {'f_name': f.__name__,
+ 'input_attr': kwargs}
+ callback(self, context, prep_info, *args, **kwargs)
+ else:
+ action = 'identity:%s' % f.__name__
+ creds = _build_policy_check_credentials(self, action,
+ context, kwargs)
+
+ policy_dict = {}
+
+ # Check to see if we need to include the target entity in our
+ # policy checks. We deduce this by seeing if the class has
+ # specified a get_member() method and that kwargs contains the
+ # appropriate entity id.
+ if (hasattr(self, 'get_member_from_driver') and
+ self.get_member_from_driver is not None):
+ key = '%s_id' % self.member_name
+ if key in kwargs:
+ ref = self.get_member_from_driver(kwargs[key])
+ policy_dict['target'] = {self.member_name: ref}
+
+ # TODO(henry-nash): Move this entire code to a member
+ # method inside v3 Auth
+ if context.get('subject_token_id') is not None:
+ token_ref = token_model.KeystoneToken(
+ token_id=context['subject_token_id'],
+ token_data=self.token_provider_api.validate_token(
+ context['subject_token_id']))
+ policy_dict.setdefault('target', {})
+ policy_dict['target'].setdefault(self.member_name, {})
+ policy_dict['target'][self.member_name]['user_id'] = (
+ token_ref.user_id)
+ try:
+ user_domain_id = token_ref.user_domain_id
+ except exception.UnexpectedError:
+ user_domain_id = None
+ if user_domain_id:
+ policy_dict['target'][self.member_name].setdefault(
+ 'user', {})
+ policy_dict['target'][self.member_name][
+ 'user'].setdefault('domain', {})
+ policy_dict['target'][self.member_name]['user'][
+ 'domain']['id'] = (
+ user_domain_id)
+
+ # Add in the kwargs, which means that any entity provided as a
+ # parameter for calls like create and update will be included.
+ policy_dict.update(kwargs)
+ self.policy_api.enforce(creds,
+ action,
+ utils.flatten_dict(policy_dict))
+ LOG.debug('RBAC: Authorization granted')
+ return f(self, context, *args, **kwargs)
+ return inner
+ return wrapper
+
+
+def filterprotected(*filters):
+ """Wraps filtered API calls with role based access controls (RBAC)."""
+
+ def _filterprotected(f):
+ @functools.wraps(f)
+ def wrapper(self, context, **kwargs):
+ if not context['is_admin']:
+ action = 'identity:%s' % f.__name__
+ creds = _build_policy_check_credentials(self, action,
+ context, kwargs)
+ # Now, build the target dict for policy check. We include:
+ #
+ # - Any query filter parameters
+ # - Data from the main url (which will be in the kwargs
+ # parameter) and would typically include the prime key
+ # of a get/update/delete call
+ #
+ # First any query filter parameters
+ target = dict()
+ if filters:
+ for item in filters:
+ if item in context['query_string']:
+ target[item] = context['query_string'][item]
+
+ LOG.debug('RBAC: Adding query filter params (%s)', (
+ ', '.join(['%s=%s' % (item, target[item])
+ for item in target])))
+
+ # Now any formal url parameters
+ for key in kwargs:
+ target[key] = kwargs[key]
+
+ self.policy_api.enforce(creds,
+ action,
+ utils.flatten_dict(target))
+
+ LOG.debug('RBAC: Authorization granted')
+ else:
+ LOG.warning(_LW('RBAC: Bypassing authorization'))
+ return f(self, context, filters, **kwargs)
+ return wrapper
+ return _filterprotected
+
+
+class V2Controller(wsgi.Application):
+ """Base controller class for Identity API v2."""
+ def _normalize_domain_id(self, context, ref):
+ """Fill in domain_id since v2 calls are not domain-aware.
+
+ This will overwrite any domain_id that was inadvertently
+ specified in the v2 call.
+
+ """
+ ref['domain_id'] = CONF.identity.default_domain_id
+ return ref
+
+ @staticmethod
+ def filter_domain_id(ref):
+ """Remove domain_id since v2 calls are not domain-aware."""
+ ref.pop('domain_id', None)
+ return ref
+
+ @staticmethod
+ def filter_domain(ref):
+ """Remove domain since v2 calls are not domain-aware.
+
+ V3 Fernet tokens builds the users with a domain in the token data.
+ This method will ensure that users create in v3 belong to the default
+ domain.
+
+ """
+ if 'domain' in ref:
+ if ref['domain'].get('id') != CONF.identity.default_domain_id:
+ raise exception.Unauthorized(
+ _('Non-default domain is not supported'))
+ del ref['domain']
+ return ref
+
+ @staticmethod
+ def normalize_username_in_response(ref):
+ """Adds username to outgoing user refs to match the v2 spec.
+
+ Internally we use `name` to represent a user's name. The v2 spec
+ requires the use of `username` instead.
+
+ """
+ if 'username' not in ref and 'name' in ref:
+ ref['username'] = ref['name']
+ return ref
+
+ @staticmethod
+ def normalize_username_in_request(ref):
+ """Adds name in incoming user refs to match the v2 spec.
+
+ Internally we use `name` to represent a user's name. The v2 spec
+ requires the use of `username` instead.
+
+ """
+ if 'name' not in ref and 'username' in ref:
+ ref['name'] = ref.pop('username')
+ return ref
+
+ @staticmethod
+ def v3_to_v2_user(ref):
+ """Convert a user_ref from v3 to v2 compatible.
+
+ * v2.0 users are not domain aware, and should have domain_id removed
+ * v2.0 users expect the use of tenantId instead of default_project_id
+ * v2.0 users have a username attribute
+
+ This method should only be applied to user_refs being returned from the
+ v2.0 controller(s).
+
+ If ref is a list type, we will iterate through each element and do the
+ conversion.
+ """
+
+ def _format_default_project_id(ref):
+ """Convert default_project_id to tenantId for v2 calls."""
+ default_project_id = ref.pop('default_project_id', None)
+ if default_project_id is not None:
+ ref['tenantId'] = default_project_id
+ elif 'tenantId' in ref:
+ # NOTE(morganfainberg): To avoid v2.0 confusion if somehow a
+ # tenantId property sneaks its way into the extra blob on the
+ # user, we remove it here. If default_project_id is set, we
+ # would override it in either case.
+ del ref['tenantId']
+
+ def _normalize_and_filter_user_properties(ref):
+ """Run through the various filter/normalization methods."""
+ _format_default_project_id(ref)
+ V2Controller.filter_domain(ref)
+ V2Controller.filter_domain_id(ref)
+ V2Controller.normalize_username_in_response(ref)
+ return ref
+
+ if isinstance(ref, dict):
+ return _normalize_and_filter_user_properties(ref)
+ elif isinstance(ref, list):
+ return [_normalize_and_filter_user_properties(x) for x in ref]
+ else:
+ raise ValueError(_('Expected dict or list: %s') % type(ref))
+
+ def format_project_list(self, tenant_refs, **kwargs):
+ """Format a v2 style project list, including marker/limits."""
+ marker = kwargs.get('marker')
+ first_index = 0
+ if marker is not None:
+ for (marker_index, tenant) in enumerate(tenant_refs):
+ if tenant['id'] == marker:
+ # we start pagination after the marker
+ first_index = marker_index + 1
+ break
+ else:
+ msg = _('Marker could not be found')
+ raise exception.ValidationError(message=msg)
+
+ limit = kwargs.get('limit')
+ last_index = None
+ if limit is not None:
+ try:
+ limit = int(limit)
+ if limit < 0:
+ raise AssertionError()
+ except (ValueError, AssertionError):
+ msg = _('Invalid limit value')
+ raise exception.ValidationError(message=msg)
+ last_index = first_index + limit
+
+ tenant_refs = tenant_refs[first_index:last_index]
+
+ for x in tenant_refs:
+ if 'enabled' not in x:
+ x['enabled'] = True
+ o = {'tenants': tenant_refs,
+ 'tenants_links': []}
+ return o
+
+
+@dependency.requires('policy_api', 'token_provider_api')
+class V3Controller(wsgi.Application):
+ """Base controller class for Identity API v3.
+
+ Child classes should set the ``collection_name`` and ``member_name`` class
+ attributes, representing the collection of entities they are exposing to
+ the API. This is required for supporting self-referential links,
+ pagination, etc.
+
+ Class parameters:
+
+ * `_mutable_parameters` - set of parameters that can be changed by users.
+ Usually used by cls.check_immutable_params()
+ * `_public_parameters` - set of parameters that are exposed to the user.
+ Usually used by cls.filter_params()
+
+ """
+
+ collection_name = 'entities'
+ member_name = 'entity'
+ get_member_from_driver = None
+
+ @classmethod
+ def base_url(cls, context, path=None):
+ endpoint = super(V3Controller, cls).base_url(context, 'public')
+ if not path:
+ path = cls.collection_name
+
+ return '%s/%s/%s' % (endpoint, 'v3', path.lstrip('/'))
+
+ def get_auth_context(self, context):
+ # TODO(dolphm): this method of accessing the auth context is terrible,
+ # but context needs to be refactored to always have reasonable values.
+ env_context = context.get('environment', {})
+ return env_context.get(authorization.AUTH_CONTEXT_ENV, {})
+
+ @classmethod
+ def full_url(cls, context, path=None):
+ url = cls.base_url(context, path)
+ if context['environment'].get('QUERY_STRING'):
+ url = '%s?%s' % (url, context['environment']['QUERY_STRING'])
+
+ return url
+
+ @classmethod
+ def query_filter_is_true(cls, filter_value):
+ """Determine if bool query param is 'True'.
+
+ We treat this the same way as we do for policy
+ enforcement:
+
+ {bool_param}=0 is treated as False
+
+ Any other value is considered to be equivalent to
+ True, including the absence of a value
+
+ """
+
+ if (isinstance(filter_value, six.string_types) and
+ filter_value == '0'):
+ val = False
+ else:
+ val = True
+ return val
+
+ @classmethod
+ def _add_self_referential_link(cls, context, ref):
+ ref.setdefault('links', {})
+ ref['links']['self'] = cls.base_url(context) + '/' + ref['id']
+
+ @classmethod
+ def wrap_member(cls, context, ref):
+ cls._add_self_referential_link(context, ref)
+ return {cls.member_name: ref}
+
+ @classmethod
+ def wrap_collection(cls, context, refs, hints=None):
+ """Wrap a collection, checking for filtering and pagination.
+
+ Returns the wrapped collection, which includes:
+ - Executing any filtering not already carried out
+ - Truncate to a set limit if necessary
+ - Adds 'self' links in every member
+ - Adds 'next', 'self' and 'prev' links for the whole collection.
+
+ :param context: the current context, containing the original url path
+ and query string
+ :param refs: the list of members of the collection
+ :param hints: list hints, containing any relevant filters and limit.
+ Any filters already satisfied by managers will have been
+ removed
+ """
+ # Check if there are any filters in hints that were not
+ # handled by the drivers. The driver will not have paginated or
+ # limited the output if it found there were filters it was unable to
+ # handle.
+
+ if hints is not None:
+ refs = cls.filter_by_attributes(refs, hints)
+
+ list_limited, refs = cls.limit(refs, hints)
+
+ for ref in refs:
+ cls.wrap_member(context, ref)
+
+ container = {cls.collection_name: refs}
+ container['links'] = {
+ 'next': None,
+ 'self': cls.full_url(context, path=context['path']),
+ 'previous': None}
+
+ if list_limited:
+ container['truncated'] = True
+
+ return container
+
+ @classmethod
+ def limit(cls, refs, hints):
+ """Limits a list of entities.
+
+ The underlying driver layer may have already truncated the collection
+ for us, but in case it was unable to handle truncation we check here.
+
+ :param refs: the list of members of the collection
+ :param hints: hints, containing, among other things, the limit
+ requested
+
+ :returns: boolean indicating whether the list was truncated, as well
+ as the list of (truncated if necessary) entities.
+
+ """
+ NOT_LIMITED = False
+ LIMITED = True
+
+ if hints is None or hints.limit is None:
+ # No truncation was requested
+ return NOT_LIMITED, refs
+
+ if hints.limit.get('truncated', False):
+ # The driver did truncate the list
+ return LIMITED, refs
+
+ if len(refs) > hints.limit['limit']:
+ # The driver layer wasn't able to truncate it for us, so we must
+ # do it here
+ return LIMITED, refs[:hints.limit['limit']]
+
+ return NOT_LIMITED, refs
+
+ @classmethod
+ def filter_by_attributes(cls, refs, hints):
+ """Filters a list of references by filter values."""
+
+ def _attr_match(ref_attr, val_attr):
+ """Matches attributes allowing for booleans as strings.
+
+ We test explicitly for a value that defines it as 'False',
+ which also means that the existence of the attribute with
+ no value implies 'True'
+
+ """
+ if type(ref_attr) is bool:
+ return ref_attr == utils.attr_as_boolean(val_attr)
+ else:
+ return ref_attr == val_attr
+
+ def _inexact_attr_match(filter, ref):
+ """Applies an inexact filter to a result dict.
+
+ :param filter: the filter in question
+ :param ref: the dict to check
+
+ :returns True if there is a match
+
+ """
+ comparator = filter['comparator']
+ key = filter['name']
+
+ if key in ref:
+ filter_value = filter['value']
+ target_value = ref[key]
+ if not filter['case_sensitive']:
+ # We only support inexact filters on strings so
+ # it's OK to use lower()
+ filter_value = filter_value.lower()
+ target_value = target_value.lower()
+
+ if comparator == 'contains':
+ return (filter_value in target_value)
+ elif comparator == 'startswith':
+ return target_value.startswith(filter_value)
+ elif comparator == 'endswith':
+ return target_value.endswith(filter_value)
+ else:
+ # We silently ignore unsupported filters
+ return True
+
+ return False
+
+ for filter in hints.filters:
+ if filter['comparator'] == 'equals':
+ attr = filter['name']
+ value = filter['value']
+ refs = [r for r in refs if _attr_match(
+ utils.flatten_dict(r).get(attr), value)]
+ else:
+ # It might be an inexact filter
+ refs = [r for r in refs if _inexact_attr_match(
+ filter, r)]
+
+ return refs
+
+ @classmethod
+ def build_driver_hints(cls, context, supported_filters):
+ """Build list hints based on the context query string.
+
+ :param context: contains the query_string from which any list hints can
+ be extracted
+ :param supported_filters: list of filters supported, so ignore any
+ keys in query_dict that are not in this list.
+
+ """
+ query_dict = context['query_string']
+ hints = driver_hints.Hints()
+
+ if query_dict is None:
+ return hints
+
+ for key in query_dict:
+ # Check if this is an exact filter
+ if supported_filters is None or key in supported_filters:
+ hints.add_filter(key, query_dict[key])
+ continue
+
+ # Check if it is an inexact filter
+ for valid_key in supported_filters:
+ # See if this entry in query_dict matches a known key with an
+ # inexact suffix added. If it doesn't match, then that just
+ # means that there is no inexact filter for that key in this
+ # query.
+ if not key.startswith(valid_key + '__'):
+ continue
+
+ base_key, comparator = key.split('__', 1)
+
+ # We map the query-style inexact of, for example:
+ #
+ # {'email__contains', 'myISP'}
+ #
+ # into a list directive add filter call parameters of:
+ #
+ # name = 'email'
+ # value = 'myISP'
+ # comparator = 'contains'
+ # case_sensitive = True
+
+ case_sensitive = True
+ if comparator.startswith('i'):
+ case_sensitive = False
+ comparator = comparator[1:]
+ hints.add_filter(base_key, query_dict[key],
+ comparator=comparator,
+ case_sensitive=case_sensitive)
+
+ # NOTE(henry-nash): If we were to support pagination, we would pull any
+ # pagination directives out of the query_dict here, and add them into
+ # the hints list.
+ return hints
+
+ def _require_matching_id(self, value, ref):
+ """Ensures the value matches the reference's ID, if any."""
+ if 'id' in ref and ref['id'] != value:
+ raise exception.ValidationError('Cannot change ID')
+
+ def _require_matching_domain_id(self, ref_id, ref, get_member):
+ """Ensure the current domain ID matches the reference one, if any.
+
+ Provided we want domain IDs to be immutable, check whether any
+ domain_id specified in the ref dictionary matches the existing
+ domain_id for this entity.
+
+ :param ref_id: the ID of the entity
+ :param ref: the dictionary of new values proposed for this entity
+ :param get_member: The member function to call to get the current
+ entity
+ :raises: :class:`keystone.exception.ValidationError`
+
+ """
+ # TODO(henry-nash): It might be safer and more efficient to do this
+ # check in the managers affected, so look to migrate this check to
+ # there in the future.
+ if CONF.domain_id_immutable and 'domain_id' in ref:
+ existing_ref = get_member(ref_id)
+ if ref['domain_id'] != existing_ref['domain_id']:
+ raise exception.ValidationError(_('Cannot change Domain ID'))
+
+ def _assign_unique_id(self, ref):
+ """Generates and assigns a unique identifier to a reference."""
+ ref = ref.copy()
+ ref['id'] = uuid.uuid4().hex
+ return ref
+
+ def _get_domain_id_for_list_request(self, context):
+ """Get the domain_id for a v3 list call.
+
+ If we running with multiple domain drivers, then the caller must
+ specify a domain_id either as a filter or as part of the token scope.
+
+ """
+ if not CONF.identity.domain_specific_drivers_enabled:
+ # We don't need to specify a domain ID in this case
+ return
+
+ if context['query_string'].get('domain_id') is not None:
+ return context['query_string'].get('domain_id')
+
+ try:
+ token_ref = token_model.KeystoneToken(
+ token_id=context['token_id'],
+ token_data=self.token_provider_api.validate_token(
+ context['token_id']))
+ except KeyError:
+ raise exception.ValidationError(
+ _('domain_id is required as part of entity'))
+ except (exception.TokenNotFound,
+ exception.UnsupportedTokenVersionException):
+ LOG.warning(_LW('Invalid token found while getting domain ID '
+ 'for list request'))
+ raise exception.Unauthorized()
+
+ if token_ref.domain_scoped:
+ return token_ref.domain_id
+ else:
+ LOG.warning(
+ _LW('No domain information specified as part of list request'))
+ raise exception.Unauthorized()
+
+ def _get_domain_id_from_token(self, context):
+ """Get the domain_id for a v3 create call.
+
+ In the case of a v3 create entity call that does not specify a domain
+ ID, the spec says that we should use the domain scoping from the token
+ being used.
+
+ """
+ # We could make this more efficient by loading the domain_id
+ # into the context in the wrapper function above (since
+ # this version of normalize_domain will only be called inside
+ # a v3 protected call). However, this optimization is probably not
+ # worth the duplication of state
+ try:
+ token_ref = token_model.KeystoneToken(
+ token_id=context['token_id'],
+ token_data=self.token_provider_api.validate_token(
+ context['token_id']))
+ except KeyError:
+ # This might happen if we use the Admin token, for instance
+ raise exception.ValidationError(
+ _('A domain-scoped token must be used'))
+ except (exception.TokenNotFound,
+ exception.UnsupportedTokenVersionException):
+ LOG.warning(_LW('Invalid token found while getting domain ID '
+ 'for list request'))
+ raise exception.Unauthorized()
+
+ if token_ref.domain_scoped:
+ return token_ref.domain_id
+ else:
+ # TODO(henry-nash): We should issue an exception here since if
+ # a v3 call does not explicitly specify the domain_id in the
+ # entity, it should be using a domain scoped token. However,
+ # the current tempest heat tests issue a v3 call without this.
+ # This is raised as bug #1283539. Once this is fixed, we
+ # should remove the line below and replace it with an error.
+ return CONF.identity.default_domain_id
+
+ def _normalize_domain_id(self, context, ref):
+ """Fill in domain_id if not specified in a v3 call."""
+ if 'domain_id' not in ref:
+ ref['domain_id'] = self._get_domain_id_from_token(context)
+ return ref
+
+ @staticmethod
+ def filter_domain_id(ref):
+ """Override v2 filter to let domain_id out for v3 calls."""
+ return ref
+
+ def check_protection(self, context, prep_info, target_attr=None):
+ """Provide call protection for complex target attributes.
+
+ As well as including the standard parameters from the original API
+ call (which is passed in prep_info), this call will add in any
+ additional entities or attributes (passed in target_attr), so that
+ they can be referenced by policy rules.
+
+ """
+ if 'is_admin' in context and context['is_admin']:
+ LOG.warning(_LW('RBAC: Bypassing authorization'))
+ else:
+ action = 'identity:%s' % prep_info['f_name']
+ # TODO(henry-nash) need to log the target attributes as well
+ creds = _build_policy_check_credentials(self, action,
+ context,
+ prep_info['input_attr'])
+ # Build the dict the policy engine will check against from both the
+ # parameters passed into the call we are protecting (which was
+ # stored in the prep_info by protected()), plus the target
+ # attributes provided.
+ policy_dict = {}
+ if target_attr:
+ policy_dict = {'target': target_attr}
+ policy_dict.update(prep_info['input_attr'])
+ self.policy_api.enforce(creds,
+ action,
+ utils.flatten_dict(policy_dict))
+ LOG.debug('RBAC: Authorization granted')
+
+ @classmethod
+ def check_immutable_params(cls, ref):
+ """Raise exception when disallowed parameter is in ref.
+
+ Check whether the ref dictionary representing a request has only
+ mutable parameters included. If not, raise an exception. This method
+ checks only root-level keys from a ref dictionary.
+
+ :param ref: a dictionary representing deserialized request to be
+ stored
+ :raises: :class:`keystone.exception.ImmutableAttributeError`
+
+ """
+ ref_keys = set(ref.keys())
+ blocked_keys = ref_keys.difference(cls._mutable_parameters)
+
+ if not blocked_keys:
+ # No immutable parameters changed
+ return
+
+ exception_args = {'target': cls.__name__,
+ 'attributes': ', '.join(blocked_keys)}
+ raise exception.ImmutableAttributeError(**exception_args)
+
+ @classmethod
+ def filter_params(cls, ref):
+ """Remove unspecified parameters from the dictionary.
+
+ This function removes unspecified parameters from the dictionary. See
+ check_immutable_parameters for corresponding function that raises
+ exceptions. This method checks only root-level keys from a ref
+ dictionary.
+
+ :param ref: a dictionary representing deserialized response to be
+ serialized
+ """
+ ref_keys = set(ref.keys())
+ blocked_keys = ref_keys - cls._public_parameters
+ for blocked_param in blocked_keys:
+ del ref[blocked_param]
+ return ref
diff --git a/keystone-moon/keystone/common/dependency.py b/keystone-moon/keystone/common/dependency.py
new file mode 100644
index 00000000..14a68f19
--- /dev/null
+++ b/keystone-moon/keystone/common/dependency.py
@@ -0,0 +1,311 @@
+# Copyright 2012 OpenStack Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""This module provides support for dependency injection.
+
+Providers are registered via the ``@provider()`` decorator, and dependencies on
+them are registered with ``@requires()`` or ``@optional()``. Providers are
+available to their consumers via an attribute. See the documentation for the
+individual functions for more detail.
+
+See also:
+
+ https://en.wikipedia.org/wiki/Dependency_injection
+
+"""
+
+import traceback
+
+import six
+
+from keystone.i18n import _
+from keystone import notifications
+
+
+_REGISTRY = {}
+
+_future_dependencies = {}
+_future_optionals = {}
+_factories = {}
+
+
+def _set_provider(name, provider):
+ _original_provider, where_registered = _REGISTRY.get(name, (None, None))
+ if where_registered:
+ raise Exception('%s already has a registered provider, at\n%s' %
+ (name, ''.join(where_registered)))
+ _REGISTRY[name] = (provider, traceback.format_stack())
+
+
+GET_REQUIRED = object()
+GET_OPTIONAL = object()
+
+
+def get_provider(name, optional=GET_REQUIRED):
+ if optional is GET_REQUIRED:
+ return _REGISTRY[name][0]
+ return _REGISTRY.get(name, (None, None))[0]
+
+
+class UnresolvableDependencyException(Exception):
+ """Raised when a required dependency is not resolvable.
+
+ See ``resolve_future_dependencies()`` for more details.
+
+ """
+ def __init__(self, name, targets):
+ msg = _('Unregistered dependency: %(name)s for %(targets)s') % {
+ 'name': name, 'targets': targets}
+ super(UnresolvableDependencyException, self).__init__(msg)
+
+
+def provider(name):
+ """A class decorator used to register providers.
+
+ When ``@provider()`` is used to decorate a class, members of that class
+ will register themselves as providers for the named dependency. As an
+ example, In the code fragment::
+
+ @dependency.provider('foo_api')
+ class Foo:
+ def __init__(self):
+ ...
+
+ ...
+
+ foo = Foo()
+
+ The object ``foo`` will be registered as a provider for ``foo_api``. No
+ more than one such instance should be created; additional instances will
+ replace the previous ones, possibly resulting in different instances being
+ used by different consumers.
+
+ """
+ def wrapper(cls):
+ def wrapped(init):
+ def register_event_callbacks(self):
+ # NOTE(morganfainberg): A provider who has an implicit
+ # dependency on other providers may utilize the event callback
+ # mechanism to react to any changes in those providers. This is
+ # performed at the .provider() mechanism so that we can ensure
+ # that the callback is only ever called once and guaranteed
+ # to be on the properly configured and instantiated backend.
+ if not hasattr(self, 'event_callbacks'):
+ return
+
+ if not isinstance(self.event_callbacks, dict):
+ msg = _('event_callbacks must be a dict')
+ raise ValueError(msg)
+
+ for event in self.event_callbacks:
+ if not isinstance(self.event_callbacks[event], dict):
+ msg = _('event_callbacks[%s] must be a dict') % event
+ raise ValueError(msg)
+ for resource_type in self.event_callbacks[event]:
+ # Make sure we register the provider for each event it
+ # cares to call back.
+ callbacks = self.event_callbacks[event][resource_type]
+ if not callbacks:
+ continue
+ if not hasattr(callbacks, '__iter__'):
+ # ensure the callback information is a list
+ # allowing multiple callbacks to exist
+ callbacks = [callbacks]
+ notifications.register_event_callback(event,
+ resource_type,
+ callbacks)
+
+ def __wrapped_init__(self, *args, **kwargs):
+ """Initialize the wrapped object and add it to the registry."""
+ init(self, *args, **kwargs)
+ _set_provider(name, self)
+ register_event_callbacks(self)
+
+ resolve_future_dependencies(__provider_name=name)
+
+ return __wrapped_init__
+
+ cls.__init__ = wrapped(cls.__init__)
+ _factories[name] = cls
+ return cls
+ return wrapper
+
+
+def _process_dependencies(obj):
+ # Any dependencies that can be resolved immediately are resolved.
+ # Dependencies that cannot be resolved immediately are stored for
+ # resolution in resolve_future_dependencies.
+
+ def process(obj, attr_name, unresolved_in_out):
+ for dependency in getattr(obj, attr_name, []):
+ if dependency not in _REGISTRY:
+ # We don't know about this dependency, so save it for later.
+ unresolved_in_out.setdefault(dependency, []).append(obj)
+ continue
+
+ setattr(obj, dependency, get_provider(dependency))
+
+ process(obj, '_dependencies', _future_dependencies)
+ process(obj, '_optionals', _future_optionals)
+
+
+def requires(*dependencies):
+ """A class decorator used to inject providers into consumers.
+
+ The required providers will be made available to instances of the decorated
+ class via an attribute with the same name as the provider. For example, in
+ the code fragment::
+
+ @dependency.requires('foo_api', 'bar_api')
+ class FooBarClient:
+ def __init__(self):
+ ...
+
+ ...
+
+ client = FooBarClient()
+
+ The object ``client`` will have attributes named ``foo_api`` and
+ ``bar_api``, which are instances of the named providers.
+
+ Objects must not rely on the existence of these attributes until after
+ ``resolve_future_dependencies()`` has been called; they may not exist
+ beforehand.
+
+ Dependencies registered via ``@required()`` must have providers; if not,
+ an ``UnresolvableDependencyException`` will be raised when
+ ``resolve_future_dependencies()`` is called.
+
+ """
+ def wrapper(self, *args, **kwargs):
+ """Inject each dependency from the registry."""
+ self.__wrapped_init__(*args, **kwargs)
+ _process_dependencies(self)
+
+ def wrapped(cls):
+ """Note the required dependencies on the object for later injection.
+
+ The dependencies of the parent class are combined with that of the
+ child class to create a new set of dependencies.
+
+ """
+ existing_dependencies = getattr(cls, '_dependencies', set())
+ cls._dependencies = existing_dependencies.union(dependencies)
+ if not hasattr(cls, '__wrapped_init__'):
+ cls.__wrapped_init__ = cls.__init__
+ cls.__init__ = wrapper
+ return cls
+
+ return wrapped
+
+
+def optional(*dependencies):
+ """Similar to ``@requires()``, except that the dependencies are optional.
+
+ If no provider is available, the attributes will be set to ``None``.
+
+ """
+ def wrapper(self, *args, **kwargs):
+ """Inject each dependency from the registry."""
+ self.__wrapped_init__(*args, **kwargs)
+ _process_dependencies(self)
+
+ def wrapped(cls):
+ """Note the optional dependencies on the object for later injection.
+
+ The dependencies of the parent class are combined with that of the
+ child class to create a new set of dependencies.
+
+ """
+ existing_optionals = getattr(cls, '_optionals', set())
+ cls._optionals = existing_optionals.union(dependencies)
+ if not hasattr(cls, '__wrapped_init__'):
+ cls.__wrapped_init__ = cls.__init__
+ cls.__init__ = wrapper
+ return cls
+
+ return wrapped
+
+
+def resolve_future_dependencies(__provider_name=None):
+ """Forces injection of all dependencies.
+
+ Before this function is called, circular dependencies may not have been
+ injected. This function should be called only once, after all global
+ providers are registered. If an object needs to be created after this
+ call, it must not have circular dependencies.
+
+ If any required dependencies are unresolvable, this function will raise an
+ ``UnresolvableDependencyException``.
+
+ Outside of this module, this function should be called with no arguments;
+ the optional argument, ``__provider_name`` is used internally, and should
+ be treated as an implementation detail.
+
+ """
+ new_providers = dict()
+ if __provider_name:
+ # A provider was registered, so take care of any objects depending on
+ # it.
+ targets = _future_dependencies.pop(__provider_name, [])
+ targets.extend(_future_optionals.pop(__provider_name, []))
+
+ for target in targets:
+ setattr(target, __provider_name, get_provider(__provider_name))
+
+ return
+
+ # Resolve optional dependencies, sets the attribute to None if there's no
+ # provider registered.
+ for dependency, targets in six.iteritems(_future_optionals.copy()):
+ provider = get_provider(dependency, optional=GET_OPTIONAL)
+ if provider is None:
+ factory = _factories.get(dependency)
+ if factory:
+ provider = factory()
+ new_providers[dependency] = provider
+ for target in targets:
+ setattr(target, dependency, provider)
+
+ # Resolve future dependencies, raises UnresolvableDependencyException if
+ # there's no provider registered.
+ try:
+ for dependency, targets in six.iteritems(_future_dependencies.copy()):
+ if dependency not in _REGISTRY:
+ # a Class was registered that could fulfill the dependency, but
+ # it has not yet been initialized.
+ factory = _factories.get(dependency)
+ if factory:
+ provider = factory()
+ new_providers[dependency] = provider
+ else:
+ raise UnresolvableDependencyException(dependency, targets)
+
+ for target in targets:
+ setattr(target, dependency, get_provider(dependency))
+ finally:
+ _future_dependencies.clear()
+ return new_providers
+
+
+def reset():
+ """Reset the registry of providers.
+
+ This is useful for unit testing to ensure that tests don't use providers
+ from previous tests.
+ """
+
+ _REGISTRY.clear()
+ _future_dependencies.clear()
+ _future_optionals.clear()
diff --git a/keystone-moon/keystone/common/driver_hints.py b/keystone-moon/keystone/common/driver_hints.py
new file mode 100644
index 00000000..0361e314
--- /dev/null
+++ b/keystone-moon/keystone/common/driver_hints.py
@@ -0,0 +1,65 @@
+# Copyright 2013 OpenStack Foundation
+# Copyright 2013 IBM Corp.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+
+class Hints(object):
+ """Encapsulate driver hints for listing entities.
+
+ Hints are modifiers that affect the return of entities from a
+ list_<entities> operation. They are typically passed to a driver to give
+ direction as to what filtering, pagination or list limiting actions are
+ being requested.
+
+ It is optional for a driver to action some or all of the list hints,
+ but any filters that it does satisfy must be marked as such by calling
+ removing the filter from the list.
+
+ A Hint object contains filters, which is a list of dicts that can be
+ accessed publicly. Also it contains a dict called limit, which will
+ indicate the amount of data we want to limit our listing to.
+
+ Each filter term consists of:
+
+ * ``name``: the name of the attribute being matched
+ * ``value``: the value against which it is being matched
+ * ``comparator``: the operation, which can be one of ``equals``,
+ ``startswith`` or ``endswith``
+ * ``case_sensitive``: whether any comparison should take account of
+ case
+ * ``type``: will always be 'filter'
+
+ """
+ def __init__(self):
+ self.limit = None
+ self.filters = list()
+
+ def add_filter(self, name, value, comparator='equals',
+ case_sensitive=False):
+ """Adds a filter to the filters list, which is publicly accessible."""
+ self.filters.append({'name': name, 'value': value,
+ 'comparator': comparator,
+ 'case_sensitive': case_sensitive,
+ 'type': 'filter'})
+
+ def get_exact_filter_by_name(self, name):
+ """Return a filter key and value if exact filter exists for name."""
+ for entry in self.filters:
+ if (entry['type'] == 'filter' and entry['name'] == name and
+ entry['comparator'] == 'equals'):
+ return entry
+
+ def set_limit(self, limit, truncated=False):
+ """Set a limit to indicate the list should be truncated."""
+ self.limit = {'limit': limit, 'type': 'limit', 'truncated': truncated}
diff --git a/keystone-moon/keystone/common/environment/__init__.py b/keystone-moon/keystone/common/environment/__init__.py
new file mode 100644
index 00000000..da1de890
--- /dev/null
+++ b/keystone-moon/keystone/common/environment/__init__.py
@@ -0,0 +1,100 @@
+# Copyright 2013 OpenStack Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import functools
+import os
+
+from oslo_log import log
+
+LOG = log.getLogger(__name__)
+
+
+__all__ = ['Server', 'httplib', 'subprocess']
+
+_configured = False
+
+Server = None
+httplib = None
+subprocess = None
+
+
+def configure_once(name):
+ """Ensure that environment configuration is only run once.
+
+ If environment is reconfigured in the same way then it is ignored.
+ It is an error to attempt to reconfigure environment in a different way.
+ """
+ def decorator(func):
+ @functools.wraps(func)
+ def wrapper(*args, **kwargs):
+ global _configured
+ if _configured:
+ if _configured == name:
+ return
+ else:
+ raise SystemError("Environment has already been "
+ "configured as %s" % _configured)
+
+ LOG.debug("Environment configured as: %s", name)
+ _configured = name
+ return func(*args, **kwargs)
+
+ return wrapper
+ return decorator
+
+
+@configure_once('eventlet')
+def use_eventlet(monkeypatch_thread=None):
+ global httplib, subprocess, Server
+
+ # This must be set before the initial import of eventlet because if
+ # dnspython is present in your environment then eventlet monkeypatches
+ # socket.getaddrinfo() with an implementation which doesn't work for IPv6.
+ os.environ['EVENTLET_NO_GREENDNS'] = 'yes'
+
+ import eventlet
+ from eventlet.green import httplib as _httplib
+ from eventlet.green import subprocess as _subprocess
+
+ from keystone.common.environment import eventlet_server
+
+ if monkeypatch_thread is None:
+ monkeypatch_thread = not os.getenv('STANDARD_THREADS')
+
+ # Raise the default from 8192 to accommodate large tokens
+ eventlet.wsgi.MAX_HEADER_LINE = 16384
+
+ # NOTE(ldbragst): Explicitly declare what should be monkey patched and
+ # what shouldn't. Doing this allows for more readable code when
+ # understanding Eventlet in Keystone. The following is a complete list
+ # of what is monkey patched instead of passing all=False and then passing
+ # module=True to monkey patch a specific module.
+ eventlet.patcher.monkey_patch(os=False, select=True, socket=True,
+ thread=monkeypatch_thread, time=True,
+ psycopg=False, MySQLdb=False)
+
+ Server = eventlet_server.Server
+ httplib = _httplib
+ subprocess = _subprocess
+
+
+@configure_once('stdlib')
+def use_stdlib():
+ global httplib, subprocess
+
+ import httplib as _httplib
+ import subprocess as _subprocess
+
+ httplib = _httplib
+ subprocess = _subprocess
diff --git a/keystone-moon/keystone/common/environment/eventlet_server.py b/keystone-moon/keystone/common/environment/eventlet_server.py
new file mode 100644
index 00000000..639e074a
--- /dev/null
+++ b/keystone-moon/keystone/common/environment/eventlet_server.py
@@ -0,0 +1,194 @@
+# Copyright 2012 OpenStack Foundation
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# Copyright 2010 OpenStack Foundation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import errno
+import re
+import socket
+import ssl
+import sys
+
+import eventlet
+import eventlet.wsgi
+import greenlet
+from oslo_log import log
+from oslo_log import loggers
+
+from keystone.i18n import _LE, _LI
+
+
+LOG = log.getLogger(__name__)
+
+# The size of a pool that is used to spawn a single green thread in which
+# a wsgi server is then started. The size of one is enough, because in case
+# of several workers the parent process forks and each child gets a copy
+# of a pool, which does not include any greenthread object as the spawn is
+# done after the fork.
+POOL_SIZE = 1
+
+
+class EventletFilteringLogger(loggers.WritableLogger):
+ # NOTE(morganfainberg): This logger is designed to filter out specific
+ # Tracebacks to limit the amount of data that eventlet can log. In the
+ # case of broken sockets (EPIPE and ECONNRESET), we are seeing a huge
+ # volume of data being written to the logs due to ~14 lines+ per traceback.
+ # The traceback in these cases are, at best, useful for limited debugging
+ # cases.
+ def __init__(self, *args, **kwargs):
+ super(EventletFilteringLogger, self).__init__(*args, **kwargs)
+ self.regex = re.compile(r'errno (%d|%d)' %
+ (errno.EPIPE, errno.ECONNRESET), re.IGNORECASE)
+
+ def write(self, msg):
+ m = self.regex.search(msg)
+ if m:
+ self.logger.log(log.logging.DEBUG, 'Error(%s) writing to socket.',
+ m.group(1))
+ else:
+ self.logger.log(self.level, msg.rstrip())
+
+
+class Server(object):
+ """Server class to manage multiple WSGI sockets and applications."""
+
+ def __init__(self, application, host=None, port=None, keepalive=False,
+ keepidle=None):
+ self.application = application
+ self.host = host or '0.0.0.0'
+ self.port = port or 0
+ # Pool for a green thread in which wsgi server will be running
+ self.pool = eventlet.GreenPool(POOL_SIZE)
+ self.socket_info = {}
+ self.greenthread = None
+ self.do_ssl = False
+ self.cert_required = False
+ self.keepalive = keepalive
+ self.keepidle = keepidle
+ self.socket = None
+
+ def listen(self, key=None, backlog=128):
+ """Create and start listening on socket.
+
+ Call before forking worker processes.
+
+ Raises Exception if this has already been called.
+ """
+
+ # TODO(dims): eventlet's green dns/socket module does not actually
+ # support IPv6 in getaddrinfo(). We need to get around this in the
+ # future or monitor upstream for a fix.
+ # Please refer below link
+ # (https://bitbucket.org/eventlet/eventlet/
+ # src/e0f578180d7d82d2ed3d8a96d520103503c524ec/eventlet/support/
+ # greendns.py?at=0.12#cl-163)
+ info = socket.getaddrinfo(self.host,
+ self.port,
+ socket.AF_UNSPEC,
+ socket.SOCK_STREAM)[0]
+
+ try:
+ self.socket = eventlet.listen(info[-1], family=info[0],
+ backlog=backlog)
+ except EnvironmentError:
+ LOG.error(_LE("Could not bind to %(host)s:%(port)s"),
+ {'host': self.host, 'port': self.port})
+ raise
+
+ LOG.info(_LI('Starting %(arg0)s on %(host)s:%(port)s'),
+ {'arg0': sys.argv[0],
+ 'host': self.host,
+ 'port': self.port})
+
+ def start(self, key=None, backlog=128):
+ """Run a WSGI server with the given application."""
+
+ if self.socket is None:
+ self.listen(key=key, backlog=backlog)
+
+ dup_socket = self.socket.dup()
+ if key:
+ self.socket_info[key] = self.socket.getsockname()
+ # SSL is enabled
+ if self.do_ssl:
+ if self.cert_required:
+ cert_reqs = ssl.CERT_REQUIRED
+ else:
+ cert_reqs = ssl.CERT_NONE
+
+ dup_socket = eventlet.wrap_ssl(dup_socket, certfile=self.certfile,
+ keyfile=self.keyfile,
+ server_side=True,
+ cert_reqs=cert_reqs,
+ ca_certs=self.ca_certs)
+
+ # Optionally enable keepalive on the wsgi socket.
+ if self.keepalive:
+ dup_socket.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1)
+
+ if self.keepidle is not None:
+ dup_socket.setsockopt(socket.IPPROTO_TCP, socket.TCP_KEEPIDLE,
+ self.keepidle)
+
+ self.greenthread = self.pool.spawn(self._run,
+ self.application,
+ dup_socket)
+
+ def set_ssl(self, certfile, keyfile=None, ca_certs=None,
+ cert_required=True):
+ self.certfile = certfile
+ self.keyfile = keyfile
+ self.ca_certs = ca_certs
+ self.cert_required = cert_required
+ self.do_ssl = True
+
+ def stop(self):
+ if self.greenthread is not None:
+ self.greenthread.kill()
+
+ def wait(self):
+ """Wait until all servers have completed running."""
+ try:
+ self.pool.waitall()
+ except KeyboardInterrupt:
+ pass
+ except greenlet.GreenletExit:
+ pass
+
+ def reset(self):
+ """Required by the service interface.
+
+ The service interface is used by the launcher when receiving a
+ SIGHUP. The service interface is defined in
+ keystone.openstack.common.service.Service.
+
+ Keystone does not need to do anything here.
+ """
+ pass
+
+ def _run(self, application, socket):
+ """Start a WSGI server with a new green thread pool."""
+ logger = log.getLogger('eventlet.wsgi.server')
+ try:
+ eventlet.wsgi.server(socket, application,
+ log=EventletFilteringLogger(logger),
+ debug=False)
+ except greenlet.GreenletExit:
+ # Wait until all servers have completed running
+ pass
+ except Exception:
+ LOG.exception(_LE('Server error'))
+ raise
diff --git a/keystone-moon/keystone/common/extension.py b/keystone-moon/keystone/common/extension.py
new file mode 100644
index 00000000..b2ea80bc
--- /dev/null
+++ b/keystone-moon/keystone/common/extension.py
@@ -0,0 +1,45 @@
+# Copyright 2013 OpenStack Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+
+ADMIN_EXTENSIONS = {}
+PUBLIC_EXTENSIONS = {}
+
+
+def register_admin_extension(url_prefix, extension_data):
+ """Register extension with collection of admin extensions.
+
+ Extensions register the information here that will show
+ up in the /extensions page as a way to indicate that the extension is
+ active.
+
+ url_prefix: unique key for the extension that will appear in the
+ urls generated by the extension.
+
+ extension_data is a dictionary. The expected fields are:
+ 'name': short, human readable name of the extension
+ 'namespace': xml namespace
+ 'alias': identifier for the extension
+ 'updated': date the extension was last updated
+ 'description': text description of the extension
+ 'links': hyperlinks to documents describing the extension
+
+ """
+ ADMIN_EXTENSIONS[url_prefix] = extension_data
+
+
+def register_public_extension(url_prefix, extension_data):
+ """Same as register_admin_extension but for public extensions."""
+
+ PUBLIC_EXTENSIONS[url_prefix] = extension_data
diff --git a/keystone-moon/keystone/common/json_home.py b/keystone-moon/keystone/common/json_home.py
new file mode 100644
index 00000000..215d596a
--- /dev/null
+++ b/keystone-moon/keystone/common/json_home.py
@@ -0,0 +1,76 @@
+# Copyright 2014 IBM Corp.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+
+import six
+
+
+def build_v3_resource_relation(resource_name):
+ return ('http://docs.openstack.org/api/openstack-identity/3/rel/%s' %
+ resource_name)
+
+
+def build_v3_extension_resource_relation(extension_name, extension_version,
+ resource_name):
+ return (
+ 'http://docs.openstack.org/api/openstack-identity/3/ext/%s/%s/rel/%s' %
+ (extension_name, extension_version, resource_name))
+
+
+def build_v3_parameter_relation(parameter_name):
+ return ('http://docs.openstack.org/api/openstack-identity/3/param/%s' %
+ parameter_name)
+
+
+def build_v3_extension_parameter_relation(extension_name, extension_version,
+ parameter_name):
+ return (
+ 'http://docs.openstack.org/api/openstack-identity/3/ext/%s/%s/param/'
+ '%s' % (extension_name, extension_version, parameter_name))
+
+
+class Parameters(object):
+ """Relationships for Common parameters."""
+
+ DOMAIN_ID = build_v3_parameter_relation('domain_id')
+ ENDPOINT_ID = build_v3_parameter_relation('endpoint_id')
+ GROUP_ID = build_v3_parameter_relation('group_id')
+ POLICY_ID = build_v3_parameter_relation('policy_id')
+ PROJECT_ID = build_v3_parameter_relation('project_id')
+ REGION_ID = build_v3_parameter_relation('region_id')
+ ROLE_ID = build_v3_parameter_relation('role_id')
+ SERVICE_ID = build_v3_parameter_relation('service_id')
+ USER_ID = build_v3_parameter_relation('user_id')
+
+
+class Status(object):
+ """Status values supported."""
+
+ DEPRECATED = 'deprecated'
+ EXPERIMENTAL = 'experimental'
+ STABLE = 'stable'
+
+ @classmethod
+ def is_supported(cls, status):
+ return status in [cls.DEPRECATED, cls.EXPERIMENTAL, cls.STABLE]
+
+
+def translate_urls(json_home, new_prefix):
+ """Given a JSON Home document, sticks new_prefix on each of the urls."""
+
+ for dummy_rel, resource in six.iteritems(json_home['resources']):
+ if 'href' in resource:
+ resource['href'] = new_prefix + resource['href']
+ elif 'href-template' in resource:
+ resource['href-template'] = new_prefix + resource['href-template']
diff --git a/keystone-moon/keystone/common/kvs/__init__.py b/keystone-moon/keystone/common/kvs/__init__.py
new file mode 100644
index 00000000..9a406a85
--- /dev/null
+++ b/keystone-moon/keystone/common/kvs/__init__.py
@@ -0,0 +1,33 @@
+# Copyright 2013 Metacloud, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from dogpile.cache import region
+
+from keystone.common.kvs.core import * # noqa
+from keystone.common.kvs.legacy import Base, DictKvs, INMEMDB # noqa
+
+
+# NOTE(morganfainberg): Provided backends are registered here in the __init__
+# for the kvs system. Any out-of-tree backends should be registered via the
+# ``backends`` option in the ``[kvs]`` section of the Keystone configuration
+# file.
+region.register_backend(
+ 'openstack.kvs.Memory',
+ 'keystone.common.kvs.backends.inmemdb',
+ 'MemoryBackend')
+
+region.register_backend(
+ 'openstack.kvs.Memcached',
+ 'keystone.common.kvs.backends.memcached',
+ 'MemcachedBackend')
diff --git a/keystone-moon/keystone/common/kvs/backends/__init__.py b/keystone-moon/keystone/common/kvs/backends/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/keystone-moon/keystone/common/kvs/backends/__init__.py
diff --git a/keystone-moon/keystone/common/kvs/backends/inmemdb.py b/keystone-moon/keystone/common/kvs/backends/inmemdb.py
new file mode 100644
index 00000000..68072ef4
--- /dev/null
+++ b/keystone-moon/keystone/common/kvs/backends/inmemdb.py
@@ -0,0 +1,69 @@
+# Copyright 2013 Metacloud, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+Keystone In-Memory Dogpile.cache backend implementation.
+"""
+
+import copy
+
+from dogpile.cache import api
+
+
+NO_VALUE = api.NO_VALUE
+
+
+class MemoryBackend(api.CacheBackend):
+ """A backend that uses a plain dictionary.
+
+ There is no size management, and values which are placed into the
+ dictionary will remain until explicitly removed. Note that Dogpile's
+ expiration of items is based on timestamps and does not remove them from
+ the cache.
+
+ E.g.::
+
+ from dogpile.cache import make_region
+
+ region = make_region().configure(
+ 'keystone.common.kvs.Memory'
+ )
+ """
+ def __init__(self, arguments):
+ self._db = {}
+
+ def _isolate_value(self, value):
+ if value is not NO_VALUE:
+ return copy.deepcopy(value)
+ return value
+
+ def get(self, key):
+ return self._isolate_value(self._db.get(key, NO_VALUE))
+
+ def get_multi(self, keys):
+ return [self.get(key) for key in keys]
+
+ def set(self, key, value):
+ self._db[key] = self._isolate_value(value)
+
+ def set_multi(self, mapping):
+ for key, value in mapping.items():
+ self.set(key, value)
+
+ def delete(self, key):
+ self._db.pop(key, None)
+
+ def delete_multi(self, keys):
+ for key in keys:
+ self.delete(key)
diff --git a/keystone-moon/keystone/common/kvs/backends/memcached.py b/keystone-moon/keystone/common/kvs/backends/memcached.py
new file mode 100644
index 00000000..db453143
--- /dev/null
+++ b/keystone-moon/keystone/common/kvs/backends/memcached.py
@@ -0,0 +1,188 @@
+# Copyright 2013 Metacloud, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+Keystone Memcached dogpile.cache backend implementation.
+"""
+
+import random as _random
+import time
+
+from dogpile.cache import api
+from dogpile.cache.backends import memcached
+from oslo_config import cfg
+from oslo_log import log
+
+from keystone.common.cache.backends import memcache_pool
+from keystone.common import manager
+from keystone import exception
+from keystone.i18n import _
+
+
+CONF = cfg.CONF
+LOG = log.getLogger(__name__)
+NO_VALUE = api.NO_VALUE
+random = _random.SystemRandom()
+
+VALID_DOGPILE_BACKENDS = dict(
+ pylibmc=memcached.PylibmcBackend,
+ bmemcached=memcached.BMemcachedBackend,
+ memcached=memcached.MemcachedBackend,
+ pooled_memcached=memcache_pool.PooledMemcachedBackend)
+
+
+class MemcachedLock(object):
+ """Simple distributed lock using memcached.
+
+ This is an adaptation of the lock featured at
+ http://amix.dk/blog/post/19386
+
+ """
+ def __init__(self, client_fn, key, lock_timeout, max_lock_attempts):
+ self.client_fn = client_fn
+ self.key = "_lock" + key
+ self.lock_timeout = lock_timeout
+ self.max_lock_attempts = max_lock_attempts
+
+ def acquire(self, wait=True):
+ client = self.client_fn()
+ for i in range(self.max_lock_attempts):
+ if client.add(self.key, 1, self.lock_timeout):
+ return True
+ elif not wait:
+ return False
+ else:
+ sleep_time = random.random()
+ time.sleep(sleep_time)
+ raise exception.UnexpectedError(
+ _('Maximum lock attempts on %s occurred.') % self.key)
+
+ def release(self):
+ client = self.client_fn()
+ client.delete(self.key)
+
+
+class MemcachedBackend(manager.Manager):
+ """Pivot point to leverage the various dogpile.cache memcached backends.
+
+ To specify a specific dogpile.cache memcached driver, pass the argument
+ `memcached_driver` set to one of the provided memcached drivers (at this
+ time `memcached`, `bmemcached`, `pylibmc` are valid).
+ """
+ def __init__(self, arguments):
+ self._key_mangler = None
+ self.raw_no_expiry_keys = set(arguments.pop('no_expiry_keys', set()))
+ self.no_expiry_hashed_keys = set()
+
+ self.lock_timeout = arguments.pop('lock_timeout', None)
+ self.max_lock_attempts = arguments.pop('max_lock_attempts', 15)
+ # NOTE(morganfainberg): Remove distributed locking from the arguments
+ # passed to the "real" backend if it exists.
+ arguments.pop('distributed_lock', None)
+ backend = arguments.pop('memcached_backend', None)
+ if 'url' not in arguments:
+ # FIXME(morganfainberg): Log deprecation warning for old-style
+ # configuration once full dict_config style configuration for
+ # KVS backends is supported. For now use the current memcache
+ # section of the configuration.
+ arguments['url'] = CONF.memcache.servers
+
+ if backend is None:
+ # NOTE(morganfainberg): Use the basic memcached backend if nothing
+ # else is supplied.
+ self.driver = VALID_DOGPILE_BACKENDS['memcached'](arguments)
+ else:
+ if backend not in VALID_DOGPILE_BACKENDS:
+ raise ValueError(
+ _('Backend `%(driver)s` is not a valid memcached '
+ 'backend. Valid drivers: %(driver_list)s') %
+ {'driver': backend,
+ 'driver_list': ','.join(VALID_DOGPILE_BACKENDS.keys())})
+ else:
+ self.driver = VALID_DOGPILE_BACKENDS[backend](arguments)
+
+ def _get_set_arguments_driver_attr(self, exclude_expiry=False):
+
+ # NOTE(morganfainberg): Shallow copy the .set_arguments dict to
+ # ensure no changes cause the values to change in the instance
+ # variable.
+ set_arguments = getattr(self.driver, 'set_arguments', {}).copy()
+
+ if exclude_expiry:
+ # NOTE(morganfainberg): Explicitly strip out the 'time' key/value
+ # from the set_arguments in the case that this key isn't meant
+ # to expire
+ set_arguments.pop('time', None)
+ return set_arguments
+
+ def set(self, key, value):
+ mapping = {key: value}
+ self.set_multi(mapping)
+
+ def set_multi(self, mapping):
+ mapping_keys = set(mapping.keys())
+ no_expiry_keys = mapping_keys.intersection(self.no_expiry_hashed_keys)
+ has_expiry_keys = mapping_keys.difference(self.no_expiry_hashed_keys)
+
+ if no_expiry_keys:
+ # NOTE(morganfainberg): For keys that have expiry excluded,
+ # bypass the backend and directly call the client. Bypass directly
+ # to the client is required as the 'set_arguments' are applied to
+ # all ``set`` and ``set_multi`` calls by the driver, by calling
+ # the client directly it is possible to exclude the ``time``
+ # argument to the memcached server.
+ new_mapping = {k: mapping[k] for k in no_expiry_keys}
+ set_arguments = self._get_set_arguments_driver_attr(
+ exclude_expiry=True)
+ self.driver.client.set_multi(new_mapping, **set_arguments)
+
+ if has_expiry_keys:
+ new_mapping = {k: mapping[k] for k in has_expiry_keys}
+ self.driver.set_multi(new_mapping)
+
+ @classmethod
+ def from_config_dict(cls, config_dict, prefix):
+ prefix_len = len(prefix)
+ return cls(
+ {key[prefix_len:]: config_dict[key] for key in config_dict
+ if key.startswith(prefix)})
+
+ @property
+ def key_mangler(self):
+ if self._key_mangler is None:
+ self._key_mangler = self.driver.key_mangler
+ return self._key_mangler
+
+ @key_mangler.setter
+ def key_mangler(self, key_mangler):
+ if callable(key_mangler):
+ self._key_mangler = key_mangler
+ self._rehash_keys()
+ elif key_mangler is None:
+ # NOTE(morganfainberg): Set the hashed key map to the unhashed
+ # list since we no longer have a key_mangler.
+ self._key_mangler = None
+ self.no_expiry_hashed_keys = self.raw_no_expiry_keys
+ else:
+ raise TypeError(_('`key_mangler` functions must be callable.'))
+
+ def _rehash_keys(self):
+ no_expire = set()
+ for key in self.raw_no_expiry_keys:
+ no_expire.add(self._key_mangler(key))
+ self.no_expiry_hashed_keys = no_expire
+
+ def get_mutex(self, key):
+ return MemcachedLock(lambda: self.driver.client, key,
+ self.lock_timeout, self.max_lock_attempts)
diff --git a/keystone-moon/keystone/common/kvs/core.py b/keystone-moon/keystone/common/kvs/core.py
new file mode 100644
index 00000000..cbbb7462
--- /dev/null
+++ b/keystone-moon/keystone/common/kvs/core.py
@@ -0,0 +1,423 @@
+# Copyright 2013 Metacloud, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import contextlib
+import threading
+import time
+import weakref
+
+from dogpile.cache import api
+from dogpile.cache import proxy
+from dogpile.cache import region
+from dogpile.cache import util as dogpile_util
+from dogpile.core import nameregistry
+from oslo_config import cfg
+from oslo_log import log
+from oslo_utils import importutils
+import six
+
+from keystone import exception
+from keystone.i18n import _
+from keystone.i18n import _LI
+from keystone.i18n import _LW
+
+
+__all__ = ['KeyValueStore', 'KeyValueStoreLock', 'LockTimeout',
+ 'get_key_value_store']
+
+
+BACKENDS_REGISTERED = False
+CONF = cfg.CONF
+KEY_VALUE_STORE_REGISTRY = weakref.WeakValueDictionary()
+LOCK_WINDOW = 1
+LOG = log.getLogger(__name__)
+NO_VALUE = api.NO_VALUE
+
+
+def _register_backends():
+ # NOTE(morganfainberg): This function exists to ensure we do not try and
+ # register the backends prior to the configuration object being fully
+ # available. We also need to ensure we do not register a given backend
+ # more than one time. All backends will be prefixed with openstack.kvs
+ # as the "short" name to reference them for configuration purposes. This
+ # function is used in addition to the pre-registered backends in the
+ # __init__ file for the KVS system.
+ global BACKENDS_REGISTERED
+
+ if not BACKENDS_REGISTERED:
+ prefix = 'openstack.kvs.%s'
+ for backend in CONF.kvs.backends:
+ module, cls = backend.rsplit('.', 1)
+ backend_name = prefix % cls
+ LOG.debug(('Registering Dogpile Backend %(backend_path)s as '
+ '%(backend_name)s'),
+ {'backend_path': backend, 'backend_name': backend_name})
+ region.register_backend(backend_name, module, cls)
+ BACKENDS_REGISTERED = True
+
+
+class LockTimeout(exception.UnexpectedError):
+ debug_message_format = _('Lock Timeout occurred for key, %(target)s')
+
+
+class KeyValueStore(object):
+ """Basic KVS manager object to support Keystone Key-Value-Store systems.
+
+ This manager also supports the concept of locking a given key resource to
+ allow for a guaranteed atomic transaction to the backend.
+ """
+ def __init__(self, kvs_region):
+ self.locking = True
+ self._lock_timeout = 0
+ self._region = kvs_region
+ self._security_strategy = None
+ self._secret_key = None
+ self._lock_registry = nameregistry.NameRegistry(self._create_mutex)
+
+ def configure(self, backing_store, key_mangler=None, proxy_list=None,
+ locking=True, **region_config_args):
+ """Configure the KeyValueStore instance.
+
+ :param backing_store: dogpile.cache short name of the region backend
+ :param key_mangler: key_mangler function
+ :param proxy_list: list of proxy classes to apply to the region
+ :param locking: boolean that allows disabling of locking mechanism for
+ this instantiation
+ :param region_config_args: key-word args passed to the dogpile.cache
+ backend for configuration
+ :return:
+ """
+ if self.is_configured:
+ # NOTE(morganfainberg): It is a bad idea to reconfigure a backend,
+ # there are a lot of pitfalls and potential memory leaks that could
+ # occur. By far the best approach is to re-create the KVS object
+ # with the new configuration.
+ raise RuntimeError(_('KVS region %s is already configured. '
+ 'Cannot reconfigure.') % self._region.name)
+
+ self.locking = locking
+ self._lock_timeout = region_config_args.pop(
+ 'lock_timeout', CONF.kvs.default_lock_timeout)
+ self._configure_region(backing_store, **region_config_args)
+ self._set_key_mangler(key_mangler)
+ self._apply_region_proxy(proxy_list)
+
+ @property
+ def is_configured(self):
+ return 'backend' in self._region.__dict__
+
+ def _apply_region_proxy(self, proxy_list):
+ if isinstance(proxy_list, list):
+ proxies = []
+
+ for item in proxy_list:
+ if isinstance(item, str):
+ LOG.debug('Importing class %s as KVS proxy.', item)
+ pxy = importutils.import_class(item)
+ else:
+ pxy = item
+
+ if issubclass(pxy, proxy.ProxyBackend):
+ proxies.append(pxy)
+ else:
+ LOG.warning(_LW('%s is not a dogpile.proxy.ProxyBackend'),
+ pxy.__name__)
+
+ for proxy_cls in reversed(proxies):
+ LOG.info(_LI('Adding proxy \'%(proxy)s\' to KVS %(name)s.'),
+ {'proxy': proxy_cls.__name__,
+ 'name': self._region.name})
+ self._region.wrap(proxy_cls)
+
+ def _assert_configured(self):
+ if'backend' not in self._region.__dict__:
+ raise exception.UnexpectedError(_('Key Value Store not '
+ 'configured: %s'),
+ self._region.name)
+
+ def _set_keymangler_on_backend(self, key_mangler):
+ try:
+ self._region.backend.key_mangler = key_mangler
+ except Exception as e:
+ # NOTE(morganfainberg): The setting of the key_mangler on the
+ # backend is used to allow the backend to
+ # calculate a hashed key value as needed. Not all backends
+ # require the ability to calculate hashed keys. If the
+ # backend does not support/require this feature log a
+ # debug line and move on otherwise raise the proper exception.
+ # Support of the feature is implied by the existence of the
+ # 'raw_no_expiry_keys' attribute.
+ if not hasattr(self._region.backend, 'raw_no_expiry_keys'):
+ LOG.debug(('Non-expiring keys not supported/required by '
+ '%(region)s backend; unable to set '
+ 'key_mangler for backend: %(err)s'),
+ {'region': self._region.name, 'err': e})
+ else:
+ raise
+
+ def _set_key_mangler(self, key_mangler):
+ # Set the key_mangler that is appropriate for the given region being
+ # configured here. The key_mangler function is called prior to storing
+ # the value(s) in the backend. This is to help prevent collisions and
+ # limit issues such as memcache's limited cache_key size.
+ use_backend_key_mangler = getattr(self._region.backend,
+ 'use_backend_key_mangler', False)
+ if ((key_mangler is None or use_backend_key_mangler) and
+ (self._region.backend.key_mangler is not None)):
+ # NOTE(morganfainberg): Use the configured key_mangler as a first
+ # choice. Second choice would be the key_mangler defined by the
+ # backend itself. Finally, fall back to the defaults. The one
+ # exception is if the backend defines `use_backend_key_mangler`
+ # as True, which indicates the backend's key_mangler should be
+ # the first choice.
+ key_mangler = self._region.backend.key_mangler
+
+ if CONF.kvs.enable_key_mangler:
+ if key_mangler is not None:
+ msg = _LI('Using %(func)s as KVS region %(name)s key_mangler')
+ if callable(key_mangler):
+ self._region.key_mangler = key_mangler
+ LOG.info(msg, {'func': key_mangler.__name__,
+ 'name': self._region.name})
+ else:
+ # NOTE(morganfainberg): We failed to set the key_mangler,
+ # we should error out here to ensure we aren't causing
+ # key-length or collision issues.
+ raise exception.ValidationError(
+ _('`key_mangler` option must be a function reference'))
+ else:
+ LOG.info(_LI('Using default dogpile sha1_mangle_key as KVS '
+ 'region %s key_mangler'), self._region.name)
+ # NOTE(morganfainberg): Sane 'default' keymangler is the
+ # dogpile sha1_mangle_key function. This ensures that unless
+ # explicitly changed, we mangle keys. This helps to limit
+ # unintended cases of exceeding cache-key in backends such
+ # as memcache.
+ self._region.key_mangler = dogpile_util.sha1_mangle_key
+ self._set_keymangler_on_backend(self._region.key_mangler)
+ else:
+ LOG.info(_LI('KVS region %s key_mangler disabled.'),
+ self._region.name)
+ self._set_keymangler_on_backend(None)
+
+ def _configure_region(self, backend, **config_args):
+ prefix = CONF.kvs.config_prefix
+ conf_dict = {}
+ conf_dict['%s.backend' % prefix] = backend
+
+ if 'distributed_lock' not in config_args:
+ config_args['distributed_lock'] = True
+
+ config_args['lock_timeout'] = self._lock_timeout
+
+ # NOTE(morganfainberg): To mitigate race conditions on comparing
+ # the timeout and current time on the lock mutex, we are building
+ # in a static 1 second overlap where the lock will still be valid
+ # in the backend but not from the perspective of the context
+ # manager. Since we must develop to the lowest-common-denominator
+ # when it comes to the backends, memcache's cache store is not more
+ # refined than 1 second, therefore we must build in at least a 1
+ # second overlap. `lock_timeout` of 0 means locks never expire.
+ if config_args['lock_timeout'] > 0:
+ config_args['lock_timeout'] += LOCK_WINDOW
+
+ for argument, value in six.iteritems(config_args):
+ arg_key = '.'.join([prefix, 'arguments', argument])
+ conf_dict[arg_key] = value
+
+ LOG.debug('KVS region configuration for %(name)s: %(config)r',
+ {'name': self._region.name, 'config': conf_dict})
+ self._region.configure_from_config(conf_dict, '%s.' % prefix)
+
+ def _mutex(self, key):
+ return self._lock_registry.get(key)
+
+ def _create_mutex(self, key):
+ mutex = self._region.backend.get_mutex(key)
+ if mutex is not None:
+ return mutex
+ else:
+ return self._LockWrapper(lock_timeout=self._lock_timeout)
+
+ class _LockWrapper(object):
+ """weakref-capable threading.Lock wrapper."""
+ def __init__(self, lock_timeout):
+ self.lock = threading.Lock()
+ self.lock_timeout = lock_timeout
+
+ def acquire(self, wait=True):
+ return self.lock.acquire(wait)
+
+ def release(self):
+ self.lock.release()
+
+ def get(self, key):
+ """Get a single value from the KVS backend."""
+ self._assert_configured()
+ value = self._region.get(key)
+ if value is NO_VALUE:
+ raise exception.NotFound(target=key)
+ return value
+
+ def get_multi(self, keys):
+ """Get multiple values in a single call from the KVS backend."""
+ self._assert_configured()
+ values = self._region.get_multi(keys)
+ not_found = []
+ for index, key in enumerate(keys):
+ if values[index] is NO_VALUE:
+ not_found.append(key)
+ if not_found:
+ # NOTE(morganfainberg): If any of the multi-get values are non-
+ # existent, we should raise a NotFound error to mimic the .get()
+ # method's behavior. In all cases the internal dogpile NO_VALUE
+ # should be masked from the consumer of the KeyValueStore.
+ raise exception.NotFound(target=not_found)
+ return values
+
+ def set(self, key, value, lock=None):
+ """Set a single value in the KVS backend."""
+ self._assert_configured()
+ with self._action_with_lock(key, lock):
+ self._region.set(key, value)
+
+ def set_multi(self, mapping):
+ """Set multiple key/value pairs in the KVS backend at once.
+
+ Like delete_multi, this call does not serialize through the
+ KeyValueStoreLock mechanism (locking cannot occur on more than one
+ key in a given context without significant deadlock potential).
+ """
+ self._assert_configured()
+ self._region.set_multi(mapping)
+
+ def delete(self, key, lock=None):
+ """Delete a single key from the KVS backend.
+
+ This method will raise NotFound if the key doesn't exist. The get and
+ delete are done in a single transaction (via KeyValueStoreLock
+ mechanism).
+ """
+ self._assert_configured()
+
+ with self._action_with_lock(key, lock):
+ self.get(key)
+ self._region.delete(key)
+
+ def delete_multi(self, keys):
+ """Delete multiple keys from the KVS backend in a single call.
+
+ Like set_multi, this call does not serialize through the
+ KeyValueStoreLock mechanism (locking cannot occur on more than one
+ key in a given context without significant deadlock potential).
+ """
+ self._assert_configured()
+ self._region.delete_multi(keys)
+
+ def get_lock(self, key):
+ """Get a write lock on the KVS value referenced by `key`.
+
+ The ability to get a context manager to pass into the set/delete
+ methods allows for a single-transaction to occur while guaranteeing the
+ backing store will not change between the start of the 'lock' and the
+ end. Lock timeout is fixed to the KeyValueStore configured lock
+ timeout.
+ """
+ self._assert_configured()
+ return KeyValueStoreLock(self._mutex(key), key, self.locking,
+ self._lock_timeout)
+
+ @contextlib.contextmanager
+ def _action_with_lock(self, key, lock=None):
+ """Wrapper context manager to validate and handle the lock and lock
+ timeout if passed in.
+ """
+ if not isinstance(lock, KeyValueStoreLock):
+ # NOTE(morganfainberg): Locking only matters if a lock is passed in
+ # to this method. If lock isn't a KeyValueStoreLock, treat this as
+ # if no locking needs to occur.
+ yield
+ else:
+ if not lock.key == key:
+ raise ValueError(_('Lock key must match target key: %(lock)s '
+ '!= %(target)s') %
+ {'lock': lock.key, 'target': key})
+ if not lock.active:
+ raise exception.ValidationError(_('Must be called within an '
+ 'active lock context.'))
+ if not lock.expired:
+ yield
+ else:
+ raise LockTimeout(target=key)
+
+
+class KeyValueStoreLock(object):
+ """Basic KeyValueStoreLock context manager that hooks into the
+ dogpile.cache backend mutex allowing for distributed locking on resources.
+
+ This is only a write lock, and will not prevent reads from occurring.
+ """
+ def __init__(self, mutex, key, locking_enabled=True, lock_timeout=0):
+ self.mutex = mutex
+ self.key = key
+ self.enabled = locking_enabled
+ self.lock_timeout = lock_timeout
+ self.active = False
+ self.acquire_time = 0
+
+ def acquire(self):
+ if self.enabled:
+ self.mutex.acquire()
+ LOG.debug('KVS lock acquired for: %s', self.key)
+ self.active = True
+ self.acquire_time = time.time()
+ return self
+
+ __enter__ = acquire
+
+ @property
+ def expired(self):
+ if self.lock_timeout:
+ calculated = time.time() - self.acquire_time + LOCK_WINDOW
+ return calculated > self.lock_timeout
+ else:
+ return False
+
+ def release(self):
+ if self.enabled:
+ self.mutex.release()
+ if not self.expired:
+ LOG.debug('KVS lock released for: %s', self.key)
+ else:
+ LOG.warning(_LW('KVS lock released (timeout reached) for: %s'),
+ self.key)
+
+ def __exit__(self, exc_type, exc_val, exc_tb):
+ self.release()
+
+
+def get_key_value_store(name, kvs_region=None):
+ """Instantiate a new :class:`.KeyValueStore` or return a previous
+ instantiation that has the same name.
+ """
+ global KEY_VALUE_STORE_REGISTRY
+
+ _register_backends()
+ key_value_store = KEY_VALUE_STORE_REGISTRY.get(name)
+ if key_value_store is None:
+ if kvs_region is None:
+ kvs_region = region.make_region(name=name)
+ key_value_store = KeyValueStore(kvs_region)
+ KEY_VALUE_STORE_REGISTRY[name] = key_value_store
+ return key_value_store
diff --git a/keystone-moon/keystone/common/kvs/legacy.py b/keystone-moon/keystone/common/kvs/legacy.py
new file mode 100644
index 00000000..ba036016
--- /dev/null
+++ b/keystone-moon/keystone/common/kvs/legacy.py
@@ -0,0 +1,60 @@
+# Copyright 2012 OpenStack Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from keystone import exception
+from keystone.openstack.common import versionutils
+
+
+class DictKvs(dict):
+ def get(self, key, default=None):
+ try:
+ if isinstance(self[key], dict):
+ return self[key].copy()
+ else:
+ return self[key][:]
+ except KeyError:
+ if default is not None:
+ return default
+ raise exception.NotFound(target=key)
+
+ def set(self, key, value):
+ if isinstance(value, dict):
+ self[key] = value.copy()
+ else:
+ self[key] = value[:]
+
+ def delete(self, key):
+ """Deletes an item, returning True on success, False otherwise."""
+ try:
+ del self[key]
+ except KeyError:
+ raise exception.NotFound(target=key)
+
+
+INMEMDB = DictKvs()
+
+
+class Base(object):
+ @versionutils.deprecated(versionutils.deprecated.ICEHOUSE,
+ in_favor_of='keystone.common.kvs.KeyValueStore',
+ remove_in=+2,
+ what='keystone.common.kvs.Base')
+ def __init__(self, db=None):
+ if db is None:
+ db = INMEMDB
+ elif isinstance(db, DictKvs):
+ db = db
+ elif isinstance(db, dict):
+ db = DictKvs(db)
+ self.db = db
diff --git a/keystone-moon/keystone/common/ldap/__init__.py b/keystone-moon/keystone/common/ldap/__init__.py
new file mode 100644
index 00000000..ab5bf4d0
--- /dev/null
+++ b/keystone-moon/keystone/common/ldap/__init__.py
@@ -0,0 +1,15 @@
+# Copyright 2012 OpenStack Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from keystone.common.ldap.core import * # noqa
diff --git a/keystone-moon/keystone/common/ldap/core.py b/keystone-moon/keystone/common/ldap/core.py
new file mode 100644
index 00000000..144c0cfd
--- /dev/null
+++ b/keystone-moon/keystone/common/ldap/core.py
@@ -0,0 +1,1910 @@
+# Copyright 2012 OpenStack Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import abc
+import codecs
+import functools
+import os.path
+import re
+import sys
+import weakref
+
+import ldap.filter
+import ldappool
+from oslo_log import log
+import six
+
+from keystone import exception
+from keystone.i18n import _
+from keystone.i18n import _LW
+
+LOG = log.getLogger(__name__)
+
+LDAP_VALUES = {'TRUE': True, 'FALSE': False}
+CONTROL_TREEDELETE = '1.2.840.113556.1.4.805'
+LDAP_SCOPES = {'one': ldap.SCOPE_ONELEVEL,
+ 'sub': ldap.SCOPE_SUBTREE}
+LDAP_DEREF = {'always': ldap.DEREF_ALWAYS,
+ 'default': None,
+ 'finding': ldap.DEREF_FINDING,
+ 'never': ldap.DEREF_NEVER,
+ 'searching': ldap.DEREF_SEARCHING}
+LDAP_TLS_CERTS = {'never': ldap.OPT_X_TLS_NEVER,
+ 'demand': ldap.OPT_X_TLS_DEMAND,
+ 'allow': ldap.OPT_X_TLS_ALLOW}
+
+
+# RFC 4511 (The LDAP Protocol) defines a list containing only the OID '1.1' to
+# indicate that no attributes should be returned besides the DN.
+DN_ONLY = ['1.1']
+
+_utf8_encoder = codecs.getencoder('utf-8')
+
+
+def utf8_encode(value):
+ """Encode a basestring to UTF-8.
+
+ If the string is unicode encode it to UTF-8, if the string is
+ str then assume it's already encoded. Otherwise raise a TypeError.
+
+ :param value: A basestring
+ :returns: UTF-8 encoded version of value
+ :raises: TypeError if value is not basestring
+ """
+ if isinstance(value, six.text_type):
+ return _utf8_encoder(value)[0]
+ elif isinstance(value, six.binary_type):
+ return value
+ else:
+ raise TypeError("value must be basestring, "
+ "not %s" % value.__class__.__name__)
+
+_utf8_decoder = codecs.getdecoder('utf-8')
+
+
+def utf8_decode(value):
+ """Decode a from UTF-8 into unicode.
+
+ If the value is a binary string assume it's UTF-8 encoded and decode
+ it into a unicode string. Otherwise convert the value from its
+ type into a unicode string.
+
+ :param value: value to be returned as unicode
+ :returns: value as unicode
+ :raises: UnicodeDecodeError for invalid UTF-8 encoding
+ """
+ if isinstance(value, six.binary_type):
+ return _utf8_decoder(value)[0]
+ return six.text_type(value)
+
+
+def py2ldap(val):
+ """Type convert a Python value to a type accepted by LDAP (unicode).
+
+ The LDAP API only accepts strings for values therefore convert
+ the value's type to a unicode string. A subsequent type conversion
+ will encode the unicode as UTF-8 as required by the python-ldap API,
+ but for now we just want a string representation of the value.
+
+ :param val: The value to convert to a LDAP string representation
+ :returns: unicode string representation of value.
+ """
+ if isinstance(val, bool):
+ return u'TRUE' if val else u'FALSE'
+ else:
+ return six.text_type(val)
+
+
+def enabled2py(val):
+ """Similar to ldap2py, only useful for the enabled attribute."""
+
+ try:
+ return LDAP_VALUES[val]
+ except KeyError:
+ pass
+ try:
+ return int(val)
+ except ValueError:
+ pass
+ return utf8_decode(val)
+
+
+def ldap2py(val):
+ """Convert an LDAP formatted value to Python type used by OpenStack.
+
+ Virtually all LDAP values are stored as UTF-8 encoded strings.
+ OpenStack prefers values which are unicode friendly.
+
+ :param val: LDAP formatted value
+ :returns: val converted to preferred Python type
+ """
+ return utf8_decode(val)
+
+
+def convert_ldap_result(ldap_result):
+ """Convert LDAP search result to Python types used by OpenStack.
+
+ Each result tuple is of the form (dn, attrs), where dn is a string
+ containing the DN (distinguished name) of the entry, and attrs is
+ a dictionary containing the attributes associated with the
+ entry. The keys of attrs are strings, and the associated values
+ are lists of strings.
+
+ OpenStack wants to use Python types of its choosing. Strings will
+ be unicode, truth values boolean, whole numbers int's, etc. DN's will
+ also be decoded from UTF-8 to unicode.
+
+ :param ldap_result: LDAP search result
+ :returns: list of 2-tuples containing (dn, attrs) where dn is unicode
+ and attrs is a dict whose values are type converted to
+ OpenStack preferred types.
+ """
+ py_result = []
+ at_least_one_referral = False
+ for dn, attrs in ldap_result:
+ ldap_attrs = {}
+ if dn is None:
+ # this is a Referral object, rather than an Entry object
+ at_least_one_referral = True
+ continue
+
+ for kind, values in six.iteritems(attrs):
+ try:
+ val2py = enabled2py if kind == 'enabled' else ldap2py
+ ldap_attrs[kind] = [val2py(x) for x in values]
+ except UnicodeDecodeError:
+ LOG.debug('Unable to decode value for attribute %s', kind)
+
+ py_result.append((utf8_decode(dn), ldap_attrs))
+ if at_least_one_referral:
+ LOG.debug(('Referrals were returned and ignored. Enable referral '
+ 'chasing in keystone.conf via [ldap] chase_referrals'))
+
+ return py_result
+
+
+def safe_iter(attrs):
+ if attrs is None:
+ return
+ elif isinstance(attrs, list):
+ for e in attrs:
+ yield e
+ else:
+ yield attrs
+
+
+def parse_deref(opt):
+ try:
+ return LDAP_DEREF[opt]
+ except KeyError:
+ raise ValueError(_('Invalid LDAP deref option: %(option)s. '
+ 'Choose one of: %(options)s') %
+ {'option': opt,
+ 'options': ', '.join(LDAP_DEREF.keys()), })
+
+
+def parse_tls_cert(opt):
+ try:
+ return LDAP_TLS_CERTS[opt]
+ except KeyError:
+ raise ValueError(_(
+ 'Invalid LDAP TLS certs option: %(option)s. '
+ 'Choose one of: %(options)s') % {
+ 'option': opt,
+ 'options': ', '.join(LDAP_TLS_CERTS.keys())})
+
+
+def ldap_scope(scope):
+ try:
+ return LDAP_SCOPES[scope]
+ except KeyError:
+ raise ValueError(
+ _('Invalid LDAP scope: %(scope)s. Choose one of: %(options)s') % {
+ 'scope': scope,
+ 'options': ', '.join(LDAP_SCOPES.keys())})
+
+
+def prep_case_insensitive(value):
+ """Prepare a string for case-insensitive comparison.
+
+ This is defined in RFC4518. For simplicity, all this function does is
+ lowercase all the characters, strip leading and trailing whitespace,
+ and compress sequences of spaces to a single space.
+ """
+ value = re.sub(r'\s+', ' ', value.strip().lower())
+ return value
+
+
+def is_ava_value_equal(attribute_type, val1, val2):
+ """Returns True if and only if the AVAs are equal.
+
+ When comparing AVAs, the equality matching rule for the attribute type
+ should be taken into consideration. For simplicity, this implementation
+ does a case-insensitive comparison.
+
+ Note that this function uses prep_case_insenstive so the limitations of
+ that function apply here.
+
+ """
+
+ return prep_case_insensitive(val1) == prep_case_insensitive(val2)
+
+
+def is_rdn_equal(rdn1, rdn2):
+ """Returns True if and only if the RDNs are equal.
+
+ * RDNs must have the same number of AVAs.
+ * Each AVA of the RDNs must be the equal for the same attribute type. The
+ order isn't significant. Note that an attribute type will only be in one
+ AVA in an RDN, otherwise the DN wouldn't be valid.
+ * Attribute types aren't case sensitive. Note that attribute type
+ comparison is more complicated than implemented. This function only
+ compares case-insentive. The code should handle multiple names for an
+ attribute type (e.g., cn, commonName, and 2.5.4.3 are the same).
+
+ Note that this function uses is_ava_value_equal to compare AVAs so the
+ limitations of that function apply here.
+
+ """
+
+ if len(rdn1) != len(rdn2):
+ return False
+
+ for attr_type_1, val1, dummy in rdn1:
+ found = False
+ for attr_type_2, val2, dummy in rdn2:
+ if attr_type_1.lower() != attr_type_2.lower():
+ continue
+
+ found = True
+ if not is_ava_value_equal(attr_type_1, val1, val2):
+ return False
+ break
+ if not found:
+ return False
+
+ return True
+
+
+def is_dn_equal(dn1, dn2):
+ """Returns True if and only if the DNs are equal.
+
+ Two DNs are equal if they've got the same number of RDNs and if the RDNs
+ are the same at each position. See RFC4517.
+
+ Note that this function uses is_rdn_equal to compare RDNs so the
+ limitations of that function apply here.
+
+ :param dn1: Either a string DN or a DN parsed by ldap.dn.str2dn.
+ :param dn2: Either a string DN or a DN parsed by ldap.dn.str2dn.
+
+ """
+
+ if not isinstance(dn1, list):
+ dn1 = ldap.dn.str2dn(utf8_encode(dn1))
+ if not isinstance(dn2, list):
+ dn2 = ldap.dn.str2dn(utf8_encode(dn2))
+
+ if len(dn1) != len(dn2):
+ return False
+
+ for rdn1, rdn2 in zip(dn1, dn2):
+ if not is_rdn_equal(rdn1, rdn2):
+ return False
+ return True
+
+
+def dn_startswith(descendant_dn, dn):
+ """Returns True if and only if the descendant_dn is under the dn.
+
+ :param descendant_dn: Either a string DN or a DN parsed by ldap.dn.str2dn.
+ :param dn: Either a string DN or a DN parsed by ldap.dn.str2dn.
+
+ """
+
+ if not isinstance(descendant_dn, list):
+ descendant_dn = ldap.dn.str2dn(utf8_encode(descendant_dn))
+ if not isinstance(dn, list):
+ dn = ldap.dn.str2dn(utf8_encode(dn))
+
+ if len(descendant_dn) <= len(dn):
+ return False
+
+ # Use the last len(dn) RDNs.
+ return is_dn_equal(descendant_dn[-len(dn):], dn)
+
+
+@six.add_metaclass(abc.ABCMeta)
+class LDAPHandler(object):
+ '''Abstract class which defines methods for a LDAP API provider.
+
+ Native Keystone values cannot be passed directly into and from the
+ python-ldap API. Type conversion must occur at the LDAP API
+ boudary, examples of type conversions are:
+
+ * booleans map to the strings 'TRUE' and 'FALSE'
+
+ * integer values map to their string representation.
+
+ * unicode strings are encoded in UTF-8
+
+ In addition to handling type conversions at the API boundary we
+ have the requirement to support more than one LDAP API
+ provider. Currently we have:
+
+ * python-ldap, this is the standard LDAP API for Python, it
+ requires access to a live LDAP server.
+
+ * Fake LDAP which emulates python-ldap. This is used for
+ testing without requiring a live LDAP server.
+
+ To support these requirements we need a layer that performs type
+ conversions and then calls another LDAP API which is configurable
+ (e.g. either python-ldap or the fake emulation).
+
+ We have an additional constraint at the time of this writing due to
+ limitations in the logging module. The logging module is not
+ capable of accepting UTF-8 encoded strings, it will throw an
+ encoding exception. Therefore all logging MUST be performed prior
+ to UTF-8 conversion. This means no logging can be performed in the
+ ldap APIs that implement the python-ldap API because those APIs
+ are defined to accept only UTF-8 strings. Thus the layer which
+ performs type conversions must also do the logging. We do the type
+ conversions in two steps, once to convert all Python types to
+ unicode strings, then log, then convert the unicode strings to
+ UTF-8.
+
+ There are a variety of ways one could accomplish this, we elect to
+ use a chaining technique whereby instances of this class simply
+ call the next member in the chain via the "conn" attribute. The
+ chain is constructed by passing in an existing instance of this
+ class as the conn attribute when the class is instantiated.
+
+ Here is a brief explanation of why other possible approaches were
+ not used:
+
+ subclassing
+
+ To perform the wrapping operations in the correct order
+ the type convesion class would have to subclass each of
+ the API providers. This is awkward, doubles the number of
+ classes, and does not scale well. It requires the type
+ conversion class to be aware of all possible API
+ providers.
+
+ decorators
+
+ Decorators provide an elegant solution to wrap methods and
+ would be an ideal way to perform type conversions before
+ calling the wrapped function and then converting the
+ values returned from the wrapped function. However
+ decorators need to be aware of the method signature, it
+ has to know what input parameters need conversion and how
+ to convert the result. For an API like python-ldap which
+ has a large number of different method signatures it would
+ require a large number of specialized
+ decorators. Experience has shown it's very easy to apply
+ the wrong decorator due to the inherent complexity and
+ tendency to cut-n-paste code. Another option is to
+ parameterize the decorator to make it "smart". Experience
+ has shown such decorators become insanely complicated and
+ difficult to understand and debug. Also decorators tend to
+ hide what's really going on when a method is called, the
+ operations being performed are not visible when looking at
+ the implemation of a decorated method, this too experience
+ has shown leads to mistakes.
+
+ Chaining simplifies both wrapping to perform type conversion as
+ well as the substitution of alternative API providers. One simply
+ creates a new instance of the API interface and insert it at the
+ front of the chain. Type conversions are explicit and obvious.
+
+ If a new method needs to be added to the API interface one adds it
+ to the abstract class definition. Should one miss adding the new
+ method to any derivations of the abstract class the code will fail
+ to load and run making it impossible to forget updating all the
+ derived classes.
+ '''
+ @abc.abstractmethod
+ def __init__(self, conn=None):
+ self.conn = conn
+
+ @abc.abstractmethod
+ def connect(self, url, page_size=0, alias_dereferencing=None,
+ use_tls=False, tls_cacertfile=None, tls_cacertdir=None,
+ tls_req_cert='demand', chase_referrals=None, debug_level=None,
+ use_pool=None, pool_size=None, pool_retry_max=None,
+ pool_retry_delay=None, pool_conn_timeout=None,
+ pool_conn_lifetime=None):
+ raise exception.NotImplemented() # pragma: no cover
+
+ @abc.abstractmethod
+ def set_option(self, option, invalue):
+ raise exception.NotImplemented() # pragma: no cover
+
+ @abc.abstractmethod
+ def get_option(self, option):
+ raise exception.NotImplemented() # pragma: no cover
+
+ @abc.abstractmethod
+ def simple_bind_s(self, who='', cred='',
+ serverctrls=None, clientctrls=None):
+ raise exception.NotImplemented() # pragma: no cover
+
+ @abc.abstractmethod
+ def unbind_s(self):
+ raise exception.NotImplemented() # pragma: no cover
+
+ @abc.abstractmethod
+ def add_s(self, dn, modlist):
+ raise exception.NotImplemented() # pragma: no cover
+
+ @abc.abstractmethod
+ def search_s(self, base, scope,
+ filterstr='(objectClass=*)', attrlist=None, attrsonly=0):
+ raise exception.NotImplemented() # pragma: no cover
+
+ @abc.abstractmethod
+ def search_ext(self, base, scope,
+ filterstr='(objectClass=*)', attrlist=None, attrsonly=0,
+ serverctrls=None, clientctrls=None,
+ timeout=-1, sizelimit=0):
+ raise exception.NotImplemented() # pragma: no cover
+
+ @abc.abstractmethod
+ def result3(self, msgid=ldap.RES_ANY, all=1, timeout=None,
+ resp_ctrl_classes=None):
+ raise exception.NotImplemented() # pragma: no cover
+
+ @abc.abstractmethod
+ def modify_s(self, dn, modlist):
+ raise exception.NotImplemented() # pragma: no cover
+
+ @abc.abstractmethod
+ def delete_s(self, dn):
+ raise exception.NotImplemented() # pragma: no cover
+
+ @abc.abstractmethod
+ def delete_ext_s(self, dn, serverctrls=None, clientctrls=None):
+ raise exception.NotImplemented() # pragma: no cover
+
+
+class PythonLDAPHandler(LDAPHandler):
+ '''Implementation of the LDAPHandler interface which calls the
+ python-ldap API.
+
+ Note, the python-ldap API requires all string values to be UTF-8
+ encoded. The KeystoneLDAPHandler enforces this prior to invoking
+ the methods in this class.
+ '''
+
+ def __init__(self, conn=None):
+ super(PythonLDAPHandler, self).__init__(conn=conn)
+
+ def connect(self, url, page_size=0, alias_dereferencing=None,
+ use_tls=False, tls_cacertfile=None, tls_cacertdir=None,
+ tls_req_cert='demand', chase_referrals=None, debug_level=None,
+ use_pool=None, pool_size=None, pool_retry_max=None,
+ pool_retry_delay=None, pool_conn_timeout=None,
+ pool_conn_lifetime=None):
+
+ _common_ldap_initialization(url=url,
+ use_tls=use_tls,
+ tls_cacertfile=tls_cacertfile,
+ tls_cacertdir=tls_cacertdir,
+ tls_req_cert=tls_req_cert,
+ debug_level=debug_level)
+
+ self.conn = ldap.initialize(url)
+ self.conn.protocol_version = ldap.VERSION3
+
+ if alias_dereferencing is not None:
+ self.conn.set_option(ldap.OPT_DEREF, alias_dereferencing)
+ self.page_size = page_size
+
+ if use_tls:
+ self.conn.start_tls_s()
+
+ if chase_referrals is not None:
+ self.conn.set_option(ldap.OPT_REFERRALS, int(chase_referrals))
+
+ def set_option(self, option, invalue):
+ return self.conn.set_option(option, invalue)
+
+ def get_option(self, option):
+ return self.conn.get_option(option)
+
+ def simple_bind_s(self, who='', cred='',
+ serverctrls=None, clientctrls=None):
+ return self.conn.simple_bind_s(who, cred, serverctrls, clientctrls)
+
+ def unbind_s(self):
+ return self.conn.unbind_s()
+
+ def add_s(self, dn, modlist):
+ return self.conn.add_s(dn, modlist)
+
+ def search_s(self, base, scope,
+ filterstr='(objectClass=*)', attrlist=None, attrsonly=0):
+ return self.conn.search_s(base, scope, filterstr,
+ attrlist, attrsonly)
+
+ def search_ext(self, base, scope,
+ filterstr='(objectClass=*)', attrlist=None, attrsonly=0,
+ serverctrls=None, clientctrls=None,
+ timeout=-1, sizelimit=0):
+ return self.conn.search_ext(base, scope,
+ filterstr, attrlist, attrsonly,
+ serverctrls, clientctrls,
+ timeout, sizelimit)
+
+ def result3(self, msgid=ldap.RES_ANY, all=1, timeout=None,
+ resp_ctrl_classes=None):
+ # The resp_ctrl_classes parameter is a recent addition to the
+ # API. It defaults to None. We do not anticipate using it.
+ # To run with older versions of python-ldap we do not pass it.
+ return self.conn.result3(msgid, all, timeout)
+
+ def modify_s(self, dn, modlist):
+ return self.conn.modify_s(dn, modlist)
+
+ def delete_s(self, dn):
+ return self.conn.delete_s(dn)
+
+ def delete_ext_s(self, dn, serverctrls=None, clientctrls=None):
+ return self.conn.delete_ext_s(dn, serverctrls, clientctrls)
+
+
+def _common_ldap_initialization(url, use_tls=False, tls_cacertfile=None,
+ tls_cacertdir=None, tls_req_cert=None,
+ debug_level=None):
+ '''Method for common ldap initialization between PythonLDAPHandler and
+ PooledLDAPHandler.
+ '''
+
+ LOG.debug("LDAP init: url=%s", url)
+ LOG.debug('LDAP init: use_tls=%s tls_cacertfile=%s tls_cacertdir=%s '
+ 'tls_req_cert=%s tls_avail=%s',
+ use_tls, tls_cacertfile, tls_cacertdir,
+ tls_req_cert, ldap.TLS_AVAIL)
+
+ if debug_level is not None:
+ ldap.set_option(ldap.OPT_DEBUG_LEVEL, debug_level)
+
+ using_ldaps = url.lower().startswith("ldaps")
+
+ if use_tls and using_ldaps:
+ raise AssertionError(_('Invalid TLS / LDAPS combination'))
+
+ # The certificate trust options apply for both LDAPS and TLS.
+ if use_tls or using_ldaps:
+ if not ldap.TLS_AVAIL:
+ raise ValueError(_('Invalid LDAP TLS_AVAIL option: %s. TLS '
+ 'not available') % ldap.TLS_AVAIL)
+ if tls_cacertfile:
+ # NOTE(topol)
+ # python ldap TLS does not verify CACERTFILE or CACERTDIR
+ # so we add some extra simple sanity check verification
+ # Also, setting these values globally (i.e. on the ldap object)
+ # works but these values are ignored when setting them on the
+ # connection
+ if not os.path.isfile(tls_cacertfile):
+ raise IOError(_("tls_cacertfile %s not found "
+ "or is not a file") %
+ tls_cacertfile)
+ ldap.set_option(ldap.OPT_X_TLS_CACERTFILE, tls_cacertfile)
+ elif tls_cacertdir:
+ # NOTE(topol)
+ # python ldap TLS does not verify CACERTFILE or CACERTDIR
+ # so we add some extra simple sanity check verification
+ # Also, setting these values globally (i.e. on the ldap object)
+ # works but these values are ignored when setting them on the
+ # connection
+ if not os.path.isdir(tls_cacertdir):
+ raise IOError(_("tls_cacertdir %s not found "
+ "or is not a directory") %
+ tls_cacertdir)
+ ldap.set_option(ldap.OPT_X_TLS_CACERTDIR, tls_cacertdir)
+ if tls_req_cert in LDAP_TLS_CERTS.values():
+ ldap.set_option(ldap.OPT_X_TLS_REQUIRE_CERT, tls_req_cert)
+ else:
+ LOG.debug("LDAP TLS: invalid TLS_REQUIRE_CERT Option=%s",
+ tls_req_cert)
+
+
+class MsgId(list):
+ '''Wrapper class to hold connection and msgid.'''
+ pass
+
+
+def use_conn_pool(func):
+ '''Use this only for connection pool specific ldap API.
+
+ This adds connection object to decorated API as next argument after self.
+ '''
+ def wrapper(self, *args, **kwargs):
+ # assert isinstance(self, PooledLDAPHandler)
+ with self._get_pool_connection() as conn:
+ self._apply_options(conn)
+ return func(self, conn, *args, **kwargs)
+ return wrapper
+
+
+class PooledLDAPHandler(LDAPHandler):
+ '''Implementation of the LDAPHandler interface which uses pooled
+ connection manager.
+
+ Pool specific configuration is defined in [ldap] section.
+ All other LDAP configuration is still used from [ldap] section
+
+ Keystone LDAP authentication logic authenticates an end user using its DN
+ and password via LDAP bind to establish supplied password is correct.
+ This can fill up the pool quickly (as pool re-uses existing connection
+ based on its bind data) and would not leave space in pool for connection
+ re-use for other LDAP operations.
+ Now a separate pool can be established for those requests when related flag
+ 'use_auth_pool' is enabled. That pool can have its own size and
+ connection lifetime. Other pool attributes are shared between those pools.
+ If 'use_pool' is disabled, then 'use_auth_pool' does not matter.
+ If 'use_auth_pool' is not enabled, then connection pooling is not used for
+ those LDAP operations.
+
+ Note, the python-ldap API requires all string values to be UTF-8
+ encoded. The KeystoneLDAPHandler enforces this prior to invoking
+ the methods in this class.
+ '''
+
+ # Added here to allow override for testing
+ Connector = ldappool.StateConnector
+ auth_pool_prefix = 'auth_pool_'
+
+ connection_pools = {} # static connector pool dict
+
+ def __init__(self, conn=None, use_auth_pool=False):
+ super(PooledLDAPHandler, self).__init__(conn=conn)
+ self.who = ''
+ self.cred = ''
+ self.conn_options = {} # connection specific options
+ self.page_size = None
+ self.use_auth_pool = use_auth_pool
+ self.conn_pool = None
+
+ def connect(self, url, page_size=0, alias_dereferencing=None,
+ use_tls=False, tls_cacertfile=None, tls_cacertdir=None,
+ tls_req_cert='demand', chase_referrals=None, debug_level=None,
+ use_pool=None, pool_size=None, pool_retry_max=None,
+ pool_retry_delay=None, pool_conn_timeout=None,
+ pool_conn_lifetime=None):
+
+ _common_ldap_initialization(url=url,
+ use_tls=use_tls,
+ tls_cacertfile=tls_cacertfile,
+ tls_cacertdir=tls_cacertdir,
+ tls_req_cert=tls_req_cert,
+ debug_level=debug_level)
+
+ self.page_size = page_size
+
+ # Following two options are not added in common initialization as they
+ # need to follow a sequence in PythonLDAPHandler code.
+ if alias_dereferencing is not None:
+ self.set_option(ldap.OPT_DEREF, alias_dereferencing)
+ if chase_referrals is not None:
+ self.set_option(ldap.OPT_REFERRALS, int(chase_referrals))
+
+ if self.use_auth_pool: # separate pool when use_auth_pool enabled
+ pool_url = self.auth_pool_prefix + url
+ else:
+ pool_url = url
+ try:
+ self.conn_pool = self.connection_pools[pool_url]
+ except KeyError:
+ self.conn_pool = ldappool.ConnectionManager(
+ url,
+ size=pool_size,
+ retry_max=pool_retry_max,
+ retry_delay=pool_retry_delay,
+ timeout=pool_conn_timeout,
+ connector_cls=self.Connector,
+ use_tls=use_tls,
+ max_lifetime=pool_conn_lifetime)
+ self.connection_pools[pool_url] = self.conn_pool
+
+ def set_option(self, option, invalue):
+ self.conn_options[option] = invalue
+
+ def get_option(self, option):
+ value = self.conn_options.get(option)
+ # if option was not specified explicitly, then use connection default
+ # value for that option if there.
+ if value is None:
+ with self._get_pool_connection() as conn:
+ value = conn.get_option(option)
+ return value
+
+ def _apply_options(self, conn):
+ # if connection has a lifetime, then it already has options specified
+ if conn.get_lifetime() > 30:
+ return
+ for option, invalue in six.iteritems(self.conn_options):
+ conn.set_option(option, invalue)
+
+ def _get_pool_connection(self):
+ return self.conn_pool.connection(self.who, self.cred)
+
+ def simple_bind_s(self, who='', cred='',
+ serverctrls=None, clientctrls=None):
+ '''Not using use_conn_pool decorator here as this API takes cred as
+ input.
+ '''
+ self.who = who
+ self.cred = cred
+ with self._get_pool_connection() as conn:
+ self._apply_options(conn)
+
+ def unbind_s(self):
+ # After connection generator is done `with` statement execution block
+ # connection is always released via finally block in ldappool.
+ # So this unbind is a no op.
+ pass
+
+ @use_conn_pool
+ def add_s(self, conn, dn, modlist):
+ return conn.add_s(dn, modlist)
+
+ @use_conn_pool
+ def search_s(self, conn, base, scope,
+ filterstr='(objectClass=*)', attrlist=None, attrsonly=0):
+ return conn.search_s(base, scope, filterstr, attrlist,
+ attrsonly)
+
+ def search_ext(self, base, scope,
+ filterstr='(objectClass=*)', attrlist=None, attrsonly=0,
+ serverctrls=None, clientctrls=None,
+ timeout=-1, sizelimit=0):
+ '''This API is asynchoronus API which returns MsgId instance to be used
+ in result3 call.
+
+ To work with result3 API in predicatable manner, same LDAP connection
+ is needed which provided msgid. So wrapping used connection and msgid
+ in MsgId class. The connection associated with search_ext is released
+ once last hard reference to MsgId object is freed. This will happen
+ when the method is done with returned MsgId usage.
+ '''
+
+ conn_ctxt = self._get_pool_connection()
+ conn = conn_ctxt.__enter__()
+ try:
+ msgid = conn.search_ext(base, scope,
+ filterstr, attrlist, attrsonly,
+ serverctrls, clientctrls,
+ timeout, sizelimit)
+ except Exception:
+ conn_ctxt.__exit__(*sys.exc_info())
+ raise
+ res = MsgId((conn, msgid))
+ weakref.ref(res, functools.partial(conn_ctxt.__exit__,
+ None, None, None))
+ return res
+
+ def result3(self, msgid, all=1, timeout=None,
+ resp_ctrl_classes=None):
+ '''This method is used to wait for and return the result of an
+ operation previously initiated by one of the LDAP asynchronous
+ operation routines (eg search_ext()) It returned an invocation
+ identifier (a message id) upon successful initiation of their
+ operation.
+
+ Input msgid is expected to be instance of class MsgId which has LDAP
+ session/connection used to execute search_ext and message idenfier.
+
+ The connection associated with search_ext is released once last hard
+ reference to MsgId object is freed. This will happen when function
+ which requested msgId and used it in result3 exits.
+ '''
+
+ conn, msg_id = msgid
+ return conn.result3(msg_id, all, timeout)
+
+ @use_conn_pool
+ def modify_s(self, conn, dn, modlist):
+ return conn.modify_s(dn, modlist)
+
+ @use_conn_pool
+ def delete_s(self, conn, dn):
+ return conn.delete_s(dn)
+
+ @use_conn_pool
+ def delete_ext_s(self, conn, dn, serverctrls=None, clientctrls=None):
+ return conn.delete_ext_s(dn, serverctrls, clientctrls)
+
+
+class KeystoneLDAPHandler(LDAPHandler):
+ '''Convert data types and perform logging.
+
+ This LDAP inteface wraps the python-ldap based interfaces. The
+ python-ldap interfaces require string values encoded in UTF-8. The
+ OpenStack logging framework at the time of this writing is not
+ capable of accepting strings encoded in UTF-8, the log functions
+ will throw decoding errors if a non-ascii character appears in a
+ string.
+
+ Prior to the call Python data types are converted to a string
+ representation as required by the LDAP APIs.
+
+ Then logging is performed so we can track what is being
+ sent/received from LDAP. Also the logging filters security
+ sensitive items (i.e. passwords).
+
+ Then the string values are encoded into UTF-8.
+
+ Then the LDAP API entry point is invoked.
+
+ Data returned from the LDAP call is converted back from UTF-8
+ encoded strings into the Python data type used internally in
+ OpenStack.
+ '''
+
+ def __init__(self, conn=None):
+ super(KeystoneLDAPHandler, self).__init__(conn=conn)
+ self.page_size = 0
+
+ def __enter__(self):
+ return self
+
+ def _disable_paging(self):
+ # Disable the pagination from now on
+ self.page_size = 0
+
+ def connect(self, url, page_size=0, alias_dereferencing=None,
+ use_tls=False, tls_cacertfile=None, tls_cacertdir=None,
+ tls_req_cert='demand', chase_referrals=None, debug_level=None,
+ use_pool=None, pool_size=None,
+ pool_retry_max=None, pool_retry_delay=None,
+ pool_conn_timeout=None, pool_conn_lifetime=None):
+ self.page_size = page_size
+ return self.conn.connect(url, page_size, alias_dereferencing,
+ use_tls, tls_cacertfile, tls_cacertdir,
+ tls_req_cert, chase_referrals,
+ debug_level=debug_level,
+ use_pool=use_pool,
+ pool_size=pool_size,
+ pool_retry_max=pool_retry_max,
+ pool_retry_delay=pool_retry_delay,
+ pool_conn_timeout=pool_conn_timeout,
+ pool_conn_lifetime=pool_conn_lifetime)
+
+ def set_option(self, option, invalue):
+ return self.conn.set_option(option, invalue)
+
+ def get_option(self, option):
+ return self.conn.get_option(option)
+
+ def simple_bind_s(self, who='', cred='',
+ serverctrls=None, clientctrls=None):
+ LOG.debug("LDAP bind: who=%s", who)
+ who_utf8 = utf8_encode(who)
+ cred_utf8 = utf8_encode(cred)
+ return self.conn.simple_bind_s(who_utf8, cred_utf8,
+ serverctrls=serverctrls,
+ clientctrls=clientctrls)
+
+ def unbind_s(self):
+ LOG.debug("LDAP unbind")
+ return self.conn.unbind_s()
+
+ def add_s(self, dn, modlist):
+ ldap_attrs = [(kind, [py2ldap(x) for x in safe_iter(values)])
+ for kind, values in modlist]
+ logging_attrs = [(kind, values
+ if kind != 'userPassword'
+ else ['****'])
+ for kind, values in ldap_attrs]
+ LOG.debug('LDAP add: dn=%s attrs=%s',
+ dn, logging_attrs)
+ dn_utf8 = utf8_encode(dn)
+ ldap_attrs_utf8 = [(kind, [utf8_encode(x) for x in safe_iter(values)])
+ for kind, values in ldap_attrs]
+ return self.conn.add_s(dn_utf8, ldap_attrs_utf8)
+
+ def search_s(self, base, scope,
+ filterstr='(objectClass=*)', attrlist=None, attrsonly=0):
+ # NOTE(morganfainberg): Remove "None" singletons from this list, which
+ # allows us to set mapped attributes to "None" as defaults in config.
+ # Without this filtering, the ldap query would raise a TypeError since
+ # attrlist is expected to be an iterable of strings.
+ if attrlist is not None:
+ attrlist = [attr for attr in attrlist if attr is not None]
+ LOG.debug('LDAP search: base=%s scope=%s filterstr=%s '
+ 'attrs=%s attrsonly=%s',
+ base, scope, filterstr, attrlist, attrsonly)
+ if self.page_size:
+ ldap_result = self._paged_search_s(base, scope,
+ filterstr, attrlist)
+ else:
+ base_utf8 = utf8_encode(base)
+ filterstr_utf8 = utf8_encode(filterstr)
+ if attrlist is None:
+ attrlist_utf8 = None
+ else:
+ attrlist_utf8 = map(utf8_encode, attrlist)
+ ldap_result = self.conn.search_s(base_utf8, scope,
+ filterstr_utf8,
+ attrlist_utf8, attrsonly)
+
+ py_result = convert_ldap_result(ldap_result)
+
+ return py_result
+
+ def search_ext(self, base, scope,
+ filterstr='(objectClass=*)', attrlist=None, attrsonly=0,
+ serverctrls=None, clientctrls=None,
+ timeout=-1, sizelimit=0):
+ if attrlist is not None:
+ attrlist = [attr for attr in attrlist if attr is not None]
+ LOG.debug('LDAP search_ext: base=%s scope=%s filterstr=%s '
+ 'attrs=%s attrsonly=%s'
+ 'serverctrls=%s clientctrls=%s timeout=%s sizelimit=%s',
+ base, scope, filterstr, attrlist, attrsonly,
+ serverctrls, clientctrls, timeout, sizelimit)
+ return self.conn.search_ext(base, scope,
+ filterstr, attrlist, attrsonly,
+ serverctrls, clientctrls,
+ timeout, sizelimit)
+
+ def _paged_search_s(self, base, scope, filterstr, attrlist=None):
+ res = []
+ use_old_paging_api = False
+ # The API for the simple paged results control changed between
+ # python-ldap 2.3 and 2.4. We need to detect the capabilities
+ # of the python-ldap version we are using.
+ if hasattr(ldap, 'LDAP_CONTROL_PAGE_OID'):
+ use_old_paging_api = True
+ lc = ldap.controls.SimplePagedResultsControl(
+ controlType=ldap.LDAP_CONTROL_PAGE_OID,
+ criticality=True,
+ controlValue=(self.page_size, ''))
+ page_ctrl_oid = ldap.LDAP_CONTROL_PAGE_OID
+ else:
+ lc = ldap.controls.libldap.SimplePagedResultsControl(
+ criticality=True,
+ size=self.page_size,
+ cookie='')
+ page_ctrl_oid = ldap.controls.SimplePagedResultsControl.controlType
+
+ base_utf8 = utf8_encode(base)
+ filterstr_utf8 = utf8_encode(filterstr)
+ if attrlist is None:
+ attrlist_utf8 = None
+ else:
+ attrlist = [attr for attr in attrlist if attr is not None]
+ attrlist_utf8 = map(utf8_encode, attrlist)
+ msgid = self.conn.search_ext(base_utf8,
+ scope,
+ filterstr_utf8,
+ attrlist_utf8,
+ serverctrls=[lc])
+ # Endless loop request pages on ldap server until it has no data
+ while True:
+ # Request to the ldap server a page with 'page_size' entries
+ rtype, rdata, rmsgid, serverctrls = self.conn.result3(msgid)
+ # Receive the data
+ res.extend(rdata)
+ pctrls = [c for c in serverctrls
+ if c.controlType == page_ctrl_oid]
+ if pctrls:
+ # LDAP server supports pagination
+ if use_old_paging_api:
+ est, cookie = pctrls[0].controlValue
+ lc.controlValue = (self.page_size, cookie)
+ else:
+ cookie = lc.cookie = pctrls[0].cookie
+
+ if cookie:
+ # There is more data still on the server
+ # so we request another page
+ msgid = self.conn.search_ext(base_utf8,
+ scope,
+ filterstr_utf8,
+ attrlist_utf8,
+ serverctrls=[lc])
+ else:
+ # Exit condition no more data on server
+ break
+ else:
+ LOG.warning(_LW('LDAP Server does not support paging. '
+ 'Disable paging in keystone.conf to '
+ 'avoid this message.'))
+ self._disable_paging()
+ break
+ return res
+
+ def result3(self, msgid=ldap.RES_ANY, all=1, timeout=None,
+ resp_ctrl_classes=None):
+ ldap_result = self.conn.result3(msgid, all, timeout, resp_ctrl_classes)
+
+ LOG.debug('LDAP result3: msgid=%s all=%s timeout=%s '
+ 'resp_ctrl_classes=%s ldap_result=%s',
+ msgid, all, timeout, resp_ctrl_classes, ldap_result)
+
+ py_result = convert_ldap_result(ldap_result)
+ return py_result
+
+ def modify_s(self, dn, modlist):
+ ldap_modlist = [
+ (op, kind, (None if values is None
+ else [py2ldap(x) for x in safe_iter(values)]))
+ for op, kind, values in modlist]
+
+ logging_modlist = [(op, kind, (values if kind != 'userPassword'
+ else ['****']))
+ for op, kind, values in ldap_modlist]
+ LOG.debug('LDAP modify: dn=%s modlist=%s',
+ dn, logging_modlist)
+
+ dn_utf8 = utf8_encode(dn)
+ ldap_modlist_utf8 = [
+ (op, kind, (None if values is None
+ else [utf8_encode(x) for x in safe_iter(values)]))
+ for op, kind, values in ldap_modlist]
+ return self.conn.modify_s(dn_utf8, ldap_modlist_utf8)
+
+ def delete_s(self, dn):
+ LOG.debug("LDAP delete: dn=%s", dn)
+ dn_utf8 = utf8_encode(dn)
+ return self.conn.delete_s(dn_utf8)
+
+ def delete_ext_s(self, dn, serverctrls=None, clientctrls=None):
+ LOG.debug('LDAP delete_ext: dn=%s serverctrls=%s clientctrls=%s',
+ dn, serverctrls, clientctrls)
+ dn_utf8 = utf8_encode(dn)
+ return self.conn.delete_ext_s(dn_utf8, serverctrls, clientctrls)
+
+ def __exit__(self, exc_type, exc_val, exc_tb):
+ self.unbind_s()
+
+
+_HANDLERS = {}
+
+
+def register_handler(prefix, handler):
+ _HANDLERS[prefix] = handler
+
+
+def _get_connection(conn_url, use_pool=False, use_auth_pool=False):
+ for prefix, handler in six.iteritems(_HANDLERS):
+ if conn_url.startswith(prefix):
+ return handler()
+
+ if use_pool:
+ return PooledLDAPHandler(use_auth_pool=use_auth_pool)
+ else:
+ return PythonLDAPHandler()
+
+
+def filter_entity(entity_ref):
+ """Filter out private items in an entity dict.
+
+ :param entity_ref: the entity dictionary. The 'dn' field will be removed.
+ 'dn' is used in LDAP, but should not be returned to the user. This
+ value may be modified.
+
+ :returns: entity_ref
+
+ """
+ if entity_ref:
+ entity_ref.pop('dn', None)
+ return entity_ref
+
+
+class BaseLdap(object):
+ DEFAULT_SUFFIX = "dc=example,dc=com"
+ DEFAULT_OU = None
+ DEFAULT_STRUCTURAL_CLASSES = None
+ DEFAULT_ID_ATTR = 'cn'
+ DEFAULT_OBJECTCLASS = None
+ DEFAULT_FILTER = None
+ DEFAULT_EXTRA_ATTR_MAPPING = []
+ DUMB_MEMBER_DN = 'cn=dumb,dc=nonexistent'
+ NotFound = None
+ notfound_arg = None
+ options_name = None
+ model = None
+ attribute_options_names = {}
+ immutable_attrs = []
+ attribute_ignore = []
+ tree_dn = None
+
+ def __init__(self, conf):
+ self.LDAP_URL = conf.ldap.url
+ self.LDAP_USER = conf.ldap.user
+ self.LDAP_PASSWORD = conf.ldap.password
+ self.LDAP_SCOPE = ldap_scope(conf.ldap.query_scope)
+ self.alias_dereferencing = parse_deref(conf.ldap.alias_dereferencing)
+ self.page_size = conf.ldap.page_size
+ self.use_tls = conf.ldap.use_tls
+ self.tls_cacertfile = conf.ldap.tls_cacertfile
+ self.tls_cacertdir = conf.ldap.tls_cacertdir
+ self.tls_req_cert = parse_tls_cert(conf.ldap.tls_req_cert)
+ self.attribute_mapping = {}
+ self.chase_referrals = conf.ldap.chase_referrals
+ self.debug_level = conf.ldap.debug_level
+
+ # LDAP Pool specific attribute
+ self.use_pool = conf.ldap.use_pool
+ self.pool_size = conf.ldap.pool_size
+ self.pool_retry_max = conf.ldap.pool_retry_max
+ self.pool_retry_delay = conf.ldap.pool_retry_delay
+ self.pool_conn_timeout = conf.ldap.pool_connection_timeout
+ self.pool_conn_lifetime = conf.ldap.pool_connection_lifetime
+
+ # End user authentication pool specific config attributes
+ self.use_auth_pool = self.use_pool and conf.ldap.use_auth_pool
+ self.auth_pool_size = conf.ldap.auth_pool_size
+ self.auth_pool_conn_lifetime = conf.ldap.auth_pool_connection_lifetime
+
+ if self.options_name is not None:
+ self.suffix = conf.ldap.suffix
+ if self.suffix is None:
+ self.suffix = self.DEFAULT_SUFFIX
+ dn = '%s_tree_dn' % self.options_name
+ self.tree_dn = (getattr(conf.ldap, dn)
+ or '%s,%s' % (self.DEFAULT_OU, self.suffix))
+
+ idatt = '%s_id_attribute' % self.options_name
+ self.id_attr = getattr(conf.ldap, idatt) or self.DEFAULT_ID_ATTR
+
+ objclass = '%s_objectclass' % self.options_name
+ self.object_class = (getattr(conf.ldap, objclass)
+ or self.DEFAULT_OBJECTCLASS)
+
+ for k, v in six.iteritems(self.attribute_options_names):
+ v = '%s_%s_attribute' % (self.options_name, v)
+ self.attribute_mapping[k] = getattr(conf.ldap, v)
+
+ attr_mapping_opt = ('%s_additional_attribute_mapping' %
+ self.options_name)
+ attr_mapping = (getattr(conf.ldap, attr_mapping_opt)
+ or self.DEFAULT_EXTRA_ATTR_MAPPING)
+ self.extra_attr_mapping = self._parse_extra_attrs(attr_mapping)
+
+ ldap_filter = '%s_filter' % self.options_name
+ self.ldap_filter = getattr(conf.ldap,
+ ldap_filter) or self.DEFAULT_FILTER
+
+ allow_create = '%s_allow_create' % self.options_name
+ self.allow_create = getattr(conf.ldap, allow_create)
+
+ allow_update = '%s_allow_update' % self.options_name
+ self.allow_update = getattr(conf.ldap, allow_update)
+
+ allow_delete = '%s_allow_delete' % self.options_name
+ self.allow_delete = getattr(conf.ldap, allow_delete)
+
+ member_attribute = '%s_member_attribute' % self.options_name
+ self.member_attribute = getattr(conf.ldap, member_attribute, None)
+
+ self.structural_classes = self.DEFAULT_STRUCTURAL_CLASSES
+
+ if self.notfound_arg is None:
+ self.notfound_arg = self.options_name + '_id'
+
+ attribute_ignore = '%s_attribute_ignore' % self.options_name
+ self.attribute_ignore = getattr(conf.ldap, attribute_ignore)
+
+ self.use_dumb_member = conf.ldap.use_dumb_member
+ self.dumb_member = (conf.ldap.dumb_member or
+ self.DUMB_MEMBER_DN)
+
+ self.subtree_delete_enabled = conf.ldap.allow_subtree_delete
+
+ def _not_found(self, object_id):
+ if self.NotFound is None:
+ return exception.NotFound(target=object_id)
+ else:
+ return self.NotFound(**{self.notfound_arg: object_id})
+
+ def _parse_extra_attrs(self, option_list):
+ mapping = {}
+ for item in option_list:
+ try:
+ ldap_attr, attr_map = item.split(':')
+ except Exception:
+ LOG.warn(_LW(
+ 'Invalid additional attribute mapping: "%s". '
+ 'Format must be <ldap_attribute>:<keystone_attribute>'),
+ item)
+ continue
+ mapping[ldap_attr] = attr_map
+ return mapping
+
+ def _is_dumb_member(self, member_dn):
+ """Checks that member is a dumb member.
+
+ :param member_dn: DN of member to be checked.
+ """
+ return (self.use_dumb_member
+ and is_dn_equal(member_dn, self.dumb_member))
+
+ def get_connection(self, user=None, password=None, end_user_auth=False):
+ use_pool = self.use_pool
+ pool_size = self.pool_size
+ pool_conn_lifetime = self.pool_conn_lifetime
+
+ if end_user_auth:
+ if not self.use_auth_pool:
+ use_pool = False
+ else:
+ pool_size = self.auth_pool_size
+ pool_conn_lifetime = self.auth_pool_conn_lifetime
+
+ conn = _get_connection(self.LDAP_URL, use_pool,
+ use_auth_pool=end_user_auth)
+
+ conn = KeystoneLDAPHandler(conn=conn)
+
+ conn.connect(self.LDAP_URL,
+ page_size=self.page_size,
+ alias_dereferencing=self.alias_dereferencing,
+ use_tls=self.use_tls,
+ tls_cacertfile=self.tls_cacertfile,
+ tls_cacertdir=self.tls_cacertdir,
+ tls_req_cert=self.tls_req_cert,
+ chase_referrals=self.chase_referrals,
+ debug_level=self.debug_level,
+ use_pool=use_pool,
+ pool_size=pool_size,
+ pool_retry_max=self.pool_retry_max,
+ pool_retry_delay=self.pool_retry_delay,
+ pool_conn_timeout=self.pool_conn_timeout,
+ pool_conn_lifetime=pool_conn_lifetime
+ )
+
+ if user is None:
+ user = self.LDAP_USER
+
+ if password is None:
+ password = self.LDAP_PASSWORD
+
+ # not all LDAP servers require authentication, so we don't bind
+ # if we don't have any user/pass
+ if user and password:
+ conn.simple_bind_s(user, password)
+
+ return conn
+
+ def _id_to_dn_string(self, object_id):
+ return u'%s=%s,%s' % (self.id_attr,
+ ldap.dn.escape_dn_chars(
+ six.text_type(object_id)),
+ self.tree_dn)
+
+ def _id_to_dn(self, object_id):
+ if self.LDAP_SCOPE == ldap.SCOPE_ONELEVEL:
+ return self._id_to_dn_string(object_id)
+ with self.get_connection() as conn:
+ search_result = conn.search_s(
+ self.tree_dn, self.LDAP_SCOPE,
+ u'(&(%(id_attr)s=%(id)s)(objectclass=%(objclass)s))' %
+ {'id_attr': self.id_attr,
+ 'id': ldap.filter.escape_filter_chars(
+ six.text_type(object_id)),
+ 'objclass': self.object_class},
+ attrlist=DN_ONLY)
+ if search_result:
+ dn, attrs = search_result[0]
+ return dn
+ else:
+ return self._id_to_dn_string(object_id)
+
+ @staticmethod
+ def _dn_to_id(dn):
+ return utf8_decode(ldap.dn.str2dn(utf8_encode(dn))[0][0][1])
+
+ def _ldap_res_to_model(self, res):
+ # LDAP attribute names may be returned in a different case than
+ # they are defined in the mapping, so we need to check for keys
+ # in a case-insensitive way. We use the case specified in the
+ # mapping for the model to ensure we have a predictable way of
+ # retrieving values later.
+ lower_res = {k.lower(): v for k, v in six.iteritems(res[1])}
+
+ id_attrs = lower_res.get(self.id_attr.lower())
+ if not id_attrs:
+ message = _('ID attribute %(id_attr)s not found in LDAP '
+ 'object %(dn)s') % ({'id_attr': self.id_attr,
+ 'dn': res[0]})
+ raise exception.NotFound(message=message)
+ if len(id_attrs) > 1:
+ # FIXME(gyee): if this is a multi-value attribute and it has
+ # multiple values, we can't use it as ID. Retain the dn_to_id
+ # logic here so it does not potentially break existing
+ # deployments. We need to fix our read-write LDAP logic so
+ # it does not get the ID from DN.
+ message = _LW('ID attribute %(id_attr)s for LDAP object %(dn)s '
+ 'has multiple values and therefore cannot be used '
+ 'as an ID. Will get the ID from DN instead') % (
+ {'id_attr': self.id_attr,
+ 'dn': res[0]})
+ LOG.warn(message)
+ id_val = self._dn_to_id(res[0])
+ else:
+ id_val = id_attrs[0]
+ obj = self.model(id=id_val)
+
+ for k in obj.known_keys:
+ if k in self.attribute_ignore:
+ continue
+
+ try:
+ map_attr = self.attribute_mapping.get(k, k)
+ if map_attr is None:
+ # Ignore attributes that are mapped to None.
+ continue
+
+ v = lower_res[map_attr.lower()]
+ except KeyError:
+ pass
+ else:
+ try:
+ obj[k] = v[0]
+ except IndexError:
+ obj[k] = None
+
+ return obj
+
+ def check_allow_create(self):
+ if not self.allow_create:
+ action = _('LDAP %s create') % self.options_name
+ raise exception.ForbiddenAction(action=action)
+
+ def check_allow_update(self):
+ if not self.allow_update:
+ action = _('LDAP %s update') % self.options_name
+ raise exception.ForbiddenAction(action=action)
+
+ def check_allow_delete(self):
+ if not self.allow_delete:
+ action = _('LDAP %s delete') % self.options_name
+ raise exception.ForbiddenAction(action=action)
+
+ def affirm_unique(self, values):
+ if values.get('name') is not None:
+ try:
+ self.get_by_name(values['name'])
+ except exception.NotFound:
+ pass
+ else:
+ raise exception.Conflict(type=self.options_name,
+ details=_('Duplicate name, %s.') %
+ values['name'])
+
+ if values.get('id') is not None:
+ try:
+ self.get(values['id'])
+ except exception.NotFound:
+ pass
+ else:
+ raise exception.Conflict(type=self.options_name,
+ details=_('Duplicate ID, %s.') %
+ values['id'])
+
+ def create(self, values):
+ self.affirm_unique(values)
+ object_classes = self.structural_classes + [self.object_class]
+ attrs = [('objectClass', object_classes)]
+ for k, v in six.iteritems(values):
+ if k in self.attribute_ignore:
+ continue
+ if k == 'id':
+ # no need to check if v is None as 'id' will always have
+ # a value
+ attrs.append((self.id_attr, [v]))
+ elif v is not None:
+ attr_type = self.attribute_mapping.get(k, k)
+ if attr_type is not None:
+ attrs.append((attr_type, [v]))
+ extra_attrs = [attr for attr, name
+ in six.iteritems(self.extra_attr_mapping)
+ if name == k]
+ for attr in extra_attrs:
+ attrs.append((attr, [v]))
+
+ if 'groupOfNames' in object_classes and self.use_dumb_member:
+ attrs.append(('member', [self.dumb_member]))
+ with self.get_connection() as conn:
+ conn.add_s(self._id_to_dn(values['id']), attrs)
+ return values
+
+ def _ldap_get(self, object_id, ldap_filter=None):
+ query = (u'(&(%(id_attr)s=%(id)s)'
+ u'%(filter)s'
+ u'(objectClass=%(object_class)s))'
+ % {'id_attr': self.id_attr,
+ 'id': ldap.filter.escape_filter_chars(
+ six.text_type(object_id)),
+ 'filter': (ldap_filter or self.ldap_filter or ''),
+ 'object_class': self.object_class})
+ with self.get_connection() as conn:
+ try:
+ attrs = list(set(([self.id_attr] +
+ self.attribute_mapping.values() +
+ self.extra_attr_mapping.keys())))
+ res = conn.search_s(self.tree_dn,
+ self.LDAP_SCOPE,
+ query,
+ attrs)
+ except ldap.NO_SUCH_OBJECT:
+ return None
+ try:
+ return res[0]
+ except IndexError:
+ return None
+
+ def _ldap_get_all(self, ldap_filter=None):
+ query = u'(&%s(objectClass=%s))' % (ldap_filter or
+ self.ldap_filter or
+ '', self.object_class)
+ with self.get_connection() as conn:
+ try:
+ attrs = list(set(([self.id_attr] +
+ self.attribute_mapping.values() +
+ self.extra_attr_mapping.keys())))
+ return conn.search_s(self.tree_dn,
+ self.LDAP_SCOPE,
+ query,
+ attrs)
+ except ldap.NO_SUCH_OBJECT:
+ return []
+
+ def _ldap_get_list(self, search_base, scope, query_params=None,
+ attrlist=None):
+ query = u'(objectClass=%s)' % self.object_class
+ if query_params:
+
+ def calc_filter(attrname, value):
+ val_esc = ldap.filter.escape_filter_chars(value)
+ return '(%s=%s)' % (attrname, val_esc)
+
+ query = (u'(&%s%s)' %
+ (query, ''.join([calc_filter(k, v) for k, v in
+ six.iteritems(query_params)])))
+ with self.get_connection() as conn:
+ return conn.search_s(search_base, scope, query, attrlist)
+
+ def get(self, object_id, ldap_filter=None):
+ res = self._ldap_get(object_id, ldap_filter)
+ if res is None:
+ raise self._not_found(object_id)
+ else:
+ return self._ldap_res_to_model(res)
+
+ def get_by_name(self, name, ldap_filter=None):
+ query = (u'(%s=%s)' % (self.attribute_mapping['name'],
+ ldap.filter.escape_filter_chars(
+ six.text_type(name))))
+ res = self.get_all(query)
+ try:
+ return res[0]
+ except IndexError:
+ raise self._not_found(name)
+
+ def get_all(self, ldap_filter=None):
+ return [self._ldap_res_to_model(x)
+ for x in self._ldap_get_all(ldap_filter)]
+
+ def update(self, object_id, values, old_obj=None):
+ if old_obj is None:
+ old_obj = self.get(object_id)
+
+ modlist = []
+ for k, v in six.iteritems(values):
+ if k == 'id':
+ # id can't be modified.
+ continue
+
+ if k in self.attribute_ignore:
+
+ # Handle 'enabled' specially since can't disable if ignored.
+ if k == 'enabled' and (not v):
+ action = _("Disabling an entity where the 'enable' "
+ "attribute is ignored by configuration.")
+ raise exception.ForbiddenAction(action=action)
+
+ continue
+
+ # attribute value has not changed
+ if k in old_obj and old_obj[k] == v:
+ continue
+
+ if k in self.immutable_attrs:
+ msg = (_("Cannot change %(option_name)s %(attr)s") %
+ {'option_name': self.options_name, 'attr': k})
+ raise exception.ValidationError(msg)
+
+ if v is None:
+ if old_obj.get(k) is not None:
+ modlist.append((ldap.MOD_DELETE,
+ self.attribute_mapping.get(k, k),
+ None))
+ continue
+
+ current_value = old_obj.get(k)
+ if current_value is None:
+ op = ldap.MOD_ADD
+ modlist.append((op, self.attribute_mapping.get(k, k), [v]))
+ elif current_value != v:
+ op = ldap.MOD_REPLACE
+ modlist.append((op, self.attribute_mapping.get(k, k), [v]))
+
+ if modlist:
+ with self.get_connection() as conn:
+ try:
+ conn.modify_s(self._id_to_dn(object_id), modlist)
+ except ldap.NO_SUCH_OBJECT:
+ raise self._not_found(object_id)
+
+ return self.get(object_id)
+
+ def delete(self, object_id):
+ with self.get_connection() as conn:
+ try:
+ conn.delete_s(self._id_to_dn(object_id))
+ except ldap.NO_SUCH_OBJECT:
+ raise self._not_found(object_id)
+
+ def deleteTree(self, object_id):
+ tree_delete_control = ldap.controls.LDAPControl(CONTROL_TREEDELETE,
+ 0,
+ None)
+ with self.get_connection() as conn:
+ try:
+ conn.delete_ext_s(self._id_to_dn(object_id),
+ serverctrls=[tree_delete_control])
+ except ldap.NO_SUCH_OBJECT:
+ raise self._not_found(object_id)
+ except ldap.NOT_ALLOWED_ON_NONLEAF:
+ # Most LDAP servers do not support the tree_delete_control.
+ # In these servers, the usual idiom is to first perform a
+ # search to get the entries to delete, then delete them in
+ # in order of child to parent, since LDAP forbids the
+ # deletion of a parent entry before deleting the children
+ # of that parent. The simplest way to do that is to delete
+ # the entries in order of the length of the DN, from longest
+ # to shortest DN.
+ dn = self._id_to_dn(object_id)
+ scope = ldap.SCOPE_SUBTREE
+ # With some directory servers, an entry with objectclass
+ # ldapsubentry will not be returned unless it is explicitly
+ # requested, by specifying the objectclass in the search
+ # filter. We must specify this, with objectclass=*, in an
+ # LDAP filter OR clause, in order to return all entries
+ filt = '(|(objectclass=*)(objectclass=ldapsubentry))'
+ # We only need the DNs of the entries. Since no attributes
+ # will be returned, we do not have to specify attrsonly=1.
+ entries = conn.search_s(dn, scope, filt, attrlist=DN_ONLY)
+ if entries:
+ for dn in sorted((e[0] for e in entries),
+ key=len, reverse=True):
+ conn.delete_s(dn)
+ else:
+ LOG.debug('No entries in LDAP subtree %s', dn)
+
+ def add_member(self, member_dn, member_list_dn):
+ """Add member to the member list.
+
+ :param member_dn: DN of member to be added.
+ :param member_list_dn: DN of group to which the
+ member will be added.
+
+ :raises: exception.Conflict: If the user was already a member.
+ self.NotFound: If the group entry didn't exist.
+ """
+ with self.get_connection() as conn:
+ try:
+ mod = (ldap.MOD_ADD, self.member_attribute, member_dn)
+ conn.modify_s(member_list_dn, [mod])
+ except ldap.TYPE_OR_VALUE_EXISTS:
+ raise exception.Conflict(_('Member %(member)s '
+ 'is already a member'
+ ' of group %(group)s') % {
+ 'member': member_dn,
+ 'group': member_list_dn})
+ except ldap.NO_SUCH_OBJECT:
+ raise self._not_found(member_list_dn)
+
+ def remove_member(self, member_dn, member_list_dn):
+ """Remove member from the member list.
+
+ :param member_dn: DN of member to be removed.
+ :param member_list_dn: DN of group from which the
+ member will be removed.
+
+ :raises: self.NotFound: If the group entry didn't exist.
+ ldap.NO_SUCH_ATTRIBUTE: If the user wasn't a member.
+ """
+ with self.get_connection() as conn:
+ try:
+ mod = (ldap.MOD_DELETE, self.member_attribute, member_dn)
+ conn.modify_s(member_list_dn, [mod])
+ except ldap.NO_SUCH_OBJECT:
+ raise self._not_found(member_list_dn)
+
+ def _delete_tree_nodes(self, search_base, scope, query_params=None):
+ query = u'(objectClass=%s)' % self.object_class
+ if query_params:
+ query = (u'(&%s%s)' %
+ (query, ''.join(['(%s=%s)'
+ % (k, ldap.filter.escape_filter_chars(v))
+ for k, v in
+ six.iteritems(query_params)])))
+ not_deleted_nodes = []
+ with self.get_connection() as conn:
+ try:
+ nodes = conn.search_s(search_base, scope, query,
+ attrlist=DN_ONLY)
+ except ldap.NO_SUCH_OBJECT:
+ LOG.debug('Could not find entry with dn=%s', search_base)
+ raise self._not_found(self._dn_to_id(search_base))
+ else:
+ for node_dn, _t in nodes:
+ try:
+ conn.delete_s(node_dn)
+ except ldap.NO_SUCH_OBJECT:
+ not_deleted_nodes.append(node_dn)
+
+ if not_deleted_nodes:
+ LOG.warn(_LW("When deleting entries for %(search_base)s, could not"
+ " delete nonexistent entries %(entries)s%(dots)s"),
+ {'search_base': search_base,
+ 'entries': not_deleted_nodes[:3],
+ 'dots': '...' if len(not_deleted_nodes) > 3 else ''})
+
+ def filter_query(self, hints, query=None):
+ """Applies filtering to a query.
+
+ :param hints: contains the list of filters, which may be None,
+ indicating that there are no filters to be applied.
+ If it's not None, then any filters satisfied here will be
+ removed so that the caller will know if any filters
+ remain to be applied.
+ :param query: LDAP query into which to include filters
+
+ :returns query: LDAP query, updated with any filters satisfied
+
+ """
+ def build_filter(filter_, hints):
+ """Build a filter for the query.
+
+ :param filter_: the dict that describes this filter
+ :param hints: contains the list of filters yet to be satisfied.
+
+ :returns query: LDAP query term to be added
+
+ """
+ ldap_attr = self.attribute_mapping[filter_['name']]
+ val_esc = ldap.filter.escape_filter_chars(filter_['value'])
+
+ if filter_['case_sensitive']:
+ # NOTE(henry-nash): Although dependent on the schema being
+ # used, most LDAP attributes are configured with case
+ # insensitive matching rules, so we'll leave this to the
+ # controller to filter.
+ return
+
+ if filter_['name'] == 'enabled':
+ # NOTE(henry-nash): Due to the different options for storing
+ # the enabled attribute (e,g, emulated or not), for now we
+ # don't try and filter this at the driver level - we simply
+ # leave the filter to be handled by the controller. It seems
+ # unlikley that this will cause a signifcant performance
+ # issue.
+ return
+
+ # TODO(henry-nash): Currently there are no booleans (other than
+ # 'enabled' that is handled above) on which you can filter. If
+ # there were, we would need to add special handling here to
+ # convert the booleans values to 'TRUE' and 'FALSE'. To do that
+ # we would also need to know which filter keys were actually
+ # booleans (this is related to bug #1411478).
+
+ if filter_['comparator'] == 'equals':
+ query_term = (u'(%(attr)s=%(val)s)'
+ % {'attr': ldap_attr, 'val': val_esc})
+ elif filter_['comparator'] == 'contains':
+ query_term = (u'(%(attr)s=*%(val)s*)'
+ % {'attr': ldap_attr, 'val': val_esc})
+ elif filter_['comparator'] == 'startswith':
+ query_term = (u'(%(attr)s=%(val)s*)'
+ % {'attr': ldap_attr, 'val': val_esc})
+ elif filter_['comparator'] == 'endswith':
+ query_term = (u'(%(attr)s=*%(val)s)'
+ % {'attr': ldap_attr, 'val': val_esc})
+ else:
+ # It's a filter we don't understand, so let the caller
+ # work out if they need to do something with it.
+ return
+
+ return query_term
+
+ if hints is None:
+ return query
+
+ filter_list = []
+ satisfied_filters = []
+
+ for filter_ in hints.filters:
+ if filter_['name'] not in self.attribute_mapping:
+ continue
+ new_filter = build_filter(filter_, hints)
+ if new_filter is not None:
+ filter_list.append(new_filter)
+ satisfied_filters.append(filter_)
+
+ if filter_list:
+ query = u'(&%s%s)' % (query, ''.join(filter_list))
+
+ # Remove satisfied filters, then the caller will know remaining filters
+ for filter_ in satisfied_filters:
+ hints.filters.remove(filter_)
+
+ return query
+
+
+class EnabledEmuMixIn(BaseLdap):
+ """Emulates boolean 'enabled' attribute if turned on.
+
+ Creates groupOfNames holding all enabled objects of this class, all missing
+ objects are considered disabled.
+
+ Options:
+
+ * $name_enabled_emulation - boolean, on/off
+ * $name_enabled_emulation_dn - DN of that groupOfNames, default is
+ cn=enabled_${name}s,${tree_dn}
+
+ Where ${name}s is the plural of self.options_name ('users' or 'tenants'),
+ ${tree_dn} is self.tree_dn.
+ """
+
+ def __init__(self, conf):
+ super(EnabledEmuMixIn, self).__init__(conf)
+ enabled_emulation = '%s_enabled_emulation' % self.options_name
+ self.enabled_emulation = getattr(conf.ldap, enabled_emulation)
+
+ enabled_emulation_dn = '%s_enabled_emulation_dn' % self.options_name
+ self.enabled_emulation_dn = getattr(conf.ldap, enabled_emulation_dn)
+ if not self.enabled_emulation_dn:
+ naming_attr_name = 'cn'
+ naming_attr_value = 'enabled_%ss' % self.options_name
+ sub_vals = (naming_attr_name, naming_attr_value, self.tree_dn)
+ self.enabled_emulation_dn = '%s=%s,%s' % sub_vals
+ naming_attr = (naming_attr_name, [naming_attr_value])
+ else:
+ # Extract the attribute name and value from the configured DN.
+ naming_dn = ldap.dn.str2dn(utf8_encode(self.enabled_emulation_dn))
+ naming_rdn = naming_dn[0][0]
+ naming_attr = (utf8_decode(naming_rdn[0]),
+ utf8_decode(naming_rdn[1]))
+ self.enabled_emulation_naming_attr = naming_attr
+
+ def _get_enabled(self, object_id):
+ dn = self._id_to_dn(object_id)
+ query = '(member=%s)' % dn
+ with self.get_connection() as conn:
+ try:
+ enabled_value = conn.search_s(self.enabled_emulation_dn,
+ ldap.SCOPE_BASE,
+ query, ['cn'])
+ except ldap.NO_SUCH_OBJECT:
+ return False
+ else:
+ return bool(enabled_value)
+
+ def _add_enabled(self, object_id):
+ if not self._get_enabled(object_id):
+ modlist = [(ldap.MOD_ADD,
+ 'member',
+ [self._id_to_dn(object_id)])]
+ with self.get_connection() as conn:
+ try:
+ conn.modify_s(self.enabled_emulation_dn, modlist)
+ except ldap.NO_SUCH_OBJECT:
+ attr_list = [('objectClass', ['groupOfNames']),
+ ('member', [self._id_to_dn(object_id)]),
+ self.enabled_emulation_naming_attr]
+ if self.use_dumb_member:
+ attr_list[1][1].append(self.dumb_member)
+ conn.add_s(self.enabled_emulation_dn, attr_list)
+
+ def _remove_enabled(self, object_id):
+ modlist = [(ldap.MOD_DELETE,
+ 'member',
+ [self._id_to_dn(object_id)])]
+ with self.get_connection() as conn:
+ try:
+ conn.modify_s(self.enabled_emulation_dn, modlist)
+ except (ldap.NO_SUCH_OBJECT, ldap.NO_SUCH_ATTRIBUTE):
+ pass
+
+ def create(self, values):
+ if self.enabled_emulation:
+ enabled_value = values.pop('enabled', True)
+ ref = super(EnabledEmuMixIn, self).create(values)
+ if 'enabled' not in self.attribute_ignore:
+ if enabled_value:
+ self._add_enabled(ref['id'])
+ ref['enabled'] = enabled_value
+ return ref
+ else:
+ return super(EnabledEmuMixIn, self).create(values)
+
+ def get(self, object_id, ldap_filter=None):
+ ref = super(EnabledEmuMixIn, self).get(object_id, ldap_filter)
+ if 'enabled' not in self.attribute_ignore and self.enabled_emulation:
+ ref['enabled'] = self._get_enabled(object_id)
+ return ref
+
+ def get_all(self, ldap_filter=None):
+ if 'enabled' not in self.attribute_ignore and self.enabled_emulation:
+ # had to copy BaseLdap.get_all here to ldap_filter by DN
+ tenant_list = [self._ldap_res_to_model(x)
+ for x in self._ldap_get_all(ldap_filter)
+ if x[0] != self.enabled_emulation_dn]
+ for tenant_ref in tenant_list:
+ tenant_ref['enabled'] = self._get_enabled(tenant_ref['id'])
+ return tenant_list
+ else:
+ return super(EnabledEmuMixIn, self).get_all(ldap_filter)
+
+ def update(self, object_id, values, old_obj=None):
+ if 'enabled' not in self.attribute_ignore and self.enabled_emulation:
+ data = values.copy()
+ enabled_value = data.pop('enabled', None)
+ ref = super(EnabledEmuMixIn, self).update(object_id, data, old_obj)
+ if enabled_value is not None:
+ if enabled_value:
+ self._add_enabled(object_id)
+ else:
+ self._remove_enabled(object_id)
+ ref['enabled'] = enabled_value
+ return ref
+ else:
+ return super(EnabledEmuMixIn, self).update(
+ object_id, values, old_obj)
+
+ def delete(self, object_id):
+ if self.enabled_emulation:
+ self._remove_enabled(object_id)
+ super(EnabledEmuMixIn, self).delete(object_id)
+
+
+class ProjectLdapStructureMixin(object):
+ """Project LDAP Structure shared between LDAP backends.
+
+ This is shared between the resource and assignment LDAP backends.
+
+ """
+ DEFAULT_OU = 'ou=Groups'
+ DEFAULT_STRUCTURAL_CLASSES = []
+ DEFAULT_OBJECTCLASS = 'groupOfNames'
+ DEFAULT_ID_ATTR = 'cn'
+ NotFound = exception.ProjectNotFound
+ notfound_arg = 'project_id' # NOTE(yorik-sar): while options_name = tenant
+ options_name = 'project'
+ attribute_options_names = {'name': 'name',
+ 'description': 'desc',
+ 'enabled': 'enabled',
+ 'domain_id': 'domain_id'}
+ immutable_attrs = ['name']
diff --git a/keystone-moon/keystone/common/manager.py b/keystone-moon/keystone/common/manager.py
new file mode 100644
index 00000000..28bf2efb
--- /dev/null
+++ b/keystone-moon/keystone/common/manager.py
@@ -0,0 +1,76 @@
+# Copyright 2012 OpenStack Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import functools
+
+from oslo_utils import importutils
+
+
+def response_truncated(f):
+ """Truncate the list returned by the wrapped function.
+
+ This is designed to wrap Manager list_{entity} methods to ensure that
+ any list limits that are defined are passed to the driver layer. If a
+ hints list is provided, the wrapper will insert the relevant limit into
+ the hints so that the underlying driver call can try and honor it. If the
+ driver does truncate the response, it will update the 'truncated' attribute
+ in the 'limit' entry in the hints list, which enables the caller of this
+ function to know if truncation has taken place. If, however, the driver
+ layer is unable to perform truncation, the 'limit' entry is simply left in
+ the hints list for the caller to handle.
+
+ A _get_list_limit() method is required to be present in the object class
+ hierarchy, which returns the limit for this backend to which we will
+ truncate.
+
+ If a hints list is not provided in the arguments of the wrapped call then
+ any limits set in the config file are ignored. This allows internal use
+ of such wrapped methods where the entire data set is needed as input for
+ the calculations of some other API (e.g. get role assignments for a given
+ project).
+
+ """
+ @functools.wraps(f)
+ def wrapper(self, *args, **kwargs):
+ if kwargs.get('hints') is None:
+ return f(self, *args, **kwargs)
+
+ list_limit = self.driver._get_list_limit()
+ if list_limit:
+ kwargs['hints'].set_limit(list_limit)
+ return f(self, *args, **kwargs)
+ return wrapper
+
+
+class Manager(object):
+ """Base class for intermediary request layer.
+
+ The Manager layer exists to support additional logic that applies to all
+ or some of the methods exposed by a service that are not specific to the
+ HTTP interface.
+
+ It also provides a stable entry point to dynamic backends.
+
+ An example of a probable use case is logging all the calls.
+
+ """
+
+ def __init__(self, driver_name):
+ self.driver = importutils.import_object(driver_name)
+
+ def __getattr__(self, name):
+ """Forward calls to the underlying driver."""
+ f = getattr(self.driver, name)
+ setattr(self, name, f)
+ return f
diff --git a/keystone-moon/keystone/common/models.py b/keystone-moon/keystone/common/models.py
new file mode 100644
index 00000000..3b3aabe1
--- /dev/null
+++ b/keystone-moon/keystone/common/models.py
@@ -0,0 +1,182 @@
+# Copyright (C) 2011 OpenStack Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Base model for keystone internal services
+
+Unless marked otherwise, all fields are strings.
+
+"""
+
+
+class Model(dict):
+ """Base model class."""
+ def __hash__(self):
+ return self['id'].__hash__()
+
+ @property
+ def known_keys(cls):
+ return cls.required_keys + cls.optional_keys
+
+
+class Token(Model):
+ """Token object.
+
+ Required keys:
+ id
+ expires (datetime)
+
+ Optional keys:
+ user
+ tenant
+ metadata
+ trust_id
+ """
+
+ required_keys = ('id', 'expires')
+ optional_keys = ('extra',)
+
+
+class Service(Model):
+ """Service object.
+
+ Required keys:
+ id
+ type
+ name
+
+ Optional keys:
+ """
+
+ required_keys = ('id', 'type', 'name')
+ optional_keys = tuple()
+
+
+class Endpoint(Model):
+ """Endpoint object
+
+ Required keys:
+ id
+ region
+ service_id
+
+ Optional keys:
+ internalurl
+ publicurl
+ adminurl
+ """
+
+ required_keys = ('id', 'region', 'service_id')
+ optional_keys = ('internalurl', 'publicurl', 'adminurl')
+
+
+class User(Model):
+ """User object.
+
+ Required keys:
+ id
+ name
+ domain_id
+
+ Optional keys:
+ password
+ description
+ email
+ enabled (bool, default True)
+ default_project_id
+ """
+
+ required_keys = ('id', 'name', 'domain_id')
+ optional_keys = ('password', 'description', 'email', 'enabled',
+ 'default_project_id')
+
+
+class Group(Model):
+ """Group object.
+
+ Required keys:
+ id
+ name
+ domain_id
+
+ Optional keys:
+
+ description
+
+ """
+
+ required_keys = ('id', 'name', 'domain_id')
+ optional_keys = ('description',)
+
+
+class Project(Model):
+ """Project object.
+
+ Required keys:
+ id
+ name
+ domain_id
+
+ Optional Keys:
+ description
+ enabled (bool, default True)
+
+ """
+
+ required_keys = ('id', 'name', 'domain_id')
+ optional_keys = ('description', 'enabled')
+
+
+class Role(Model):
+ """Role object.
+
+ Required keys:
+ id
+ name
+
+ """
+
+ required_keys = ('id', 'name')
+ optional_keys = tuple()
+
+
+class Trust(Model):
+ """Trust object.
+
+ Required keys:
+ id
+ trustor_user_id
+ trustee_user_id
+ project_id
+ """
+
+ required_keys = ('id', 'trustor_user_id', 'trustee_user_id', 'project_id')
+ optional_keys = ('expires_at',)
+
+
+class Domain(Model):
+ """Domain object.
+
+ Required keys:
+ id
+ name
+
+ Optional keys:
+
+ description
+ enabled (bool, default True)
+
+ """
+
+ required_keys = ('id', 'name')
+ optional_keys = ('description', 'enabled')
diff --git a/keystone-moon/keystone/common/openssl.py b/keystone-moon/keystone/common/openssl.py
new file mode 100644
index 00000000..4eb7d1d1
--- /dev/null
+++ b/keystone-moon/keystone/common/openssl.py
@@ -0,0 +1,347 @@
+# Copyright 2012 OpenStack Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+
+import os
+
+from oslo_config import cfg
+from oslo_log import log
+
+from keystone.common import environment
+from keystone.common import utils
+from keystone.i18n import _LI, _LE
+
+LOG = log.getLogger(__name__)
+CONF = cfg.CONF
+
+PUBLIC_DIR_PERMS = 0o755 # -rwxr-xr-x
+PRIVATE_DIR_PERMS = 0o750 # -rwxr-x---
+PUBLIC_FILE_PERMS = 0o644 # -rw-r--r--
+PRIVATE_FILE_PERMS = 0o640 # -rw-r-----
+
+
+def file_exists(file_path):
+ return os.path.exists(file_path)
+
+
+class BaseCertificateConfigure(object):
+ """Create a certificate signing environment.
+
+ This is based on a config section and reasonable OpenSSL defaults.
+
+ """
+
+ def __init__(self, conf_obj, server_conf_obj, keystone_user,
+ keystone_group, rebuild, **kwargs):
+ self.conf_dir = os.path.dirname(server_conf_obj.ca_certs)
+ self.use_keystone_user = keystone_user
+ self.use_keystone_group = keystone_group
+ self.rebuild = rebuild
+ self.ssl_config_file_name = os.path.join(self.conf_dir, "openssl.conf")
+ self.request_file_name = os.path.join(self.conf_dir, "req.pem")
+ self.ssl_dictionary = {'conf_dir': self.conf_dir,
+ 'ca_cert': server_conf_obj.ca_certs,
+ 'default_md': 'default',
+ 'ssl_config': self.ssl_config_file_name,
+ 'ca_private_key': conf_obj.ca_key,
+ 'request_file': self.request_file_name,
+ 'signing_key': server_conf_obj.keyfile,
+ 'signing_cert': server_conf_obj.certfile,
+ 'key_size': int(conf_obj.key_size),
+ 'valid_days': int(conf_obj.valid_days),
+ 'cert_subject': conf_obj.cert_subject}
+
+ try:
+ # OpenSSL 1.0 and newer support default_md = default, olders do not
+ openssl_ver = environment.subprocess.Popen(
+ ['openssl', 'version'],
+ stdout=environment.subprocess.PIPE).stdout.read()
+ if "OpenSSL 0." in openssl_ver:
+ self.ssl_dictionary['default_md'] = 'sha1'
+ except OSError:
+ LOG.warn('Failed to invoke ``openssl version``, '
+ 'assuming is v1.0 or newer')
+ self.ssl_dictionary.update(kwargs)
+
+ def exec_command(self, command):
+ to_exec = []
+ for cmd_part in command:
+ to_exec.append(cmd_part % self.ssl_dictionary)
+ LOG.info(_LI('Running command - %s'), ' '.join(to_exec))
+ # NOTE(Jeffrey4l): Redirect both stdout and stderr to pipe, so the
+ # output can be captured.
+ # NOTE(Jeffrey4l): check_output is not compatible with Python 2.6.
+ # So use Popen instead.
+ process = environment.subprocess.Popen(
+ to_exec,
+ stdout=environment.subprocess.PIPE,
+ stderr=environment.subprocess.STDOUT)
+ output = process.communicate()[0]
+ retcode = process.poll()
+ if retcode:
+ LOG.error(_LE('Command %(to_exec)s exited with %(retcode)s'
+ '- %(output)s'),
+ {'to_exec': to_exec,
+ 'retcode': retcode,
+ 'output': output})
+ e = environment.subprocess.CalledProcessError(retcode, to_exec[0])
+ # NOTE(Jeffrey4l): Python 2.6 compatibility:
+ # CalledProcessError did not have output keyword argument
+ e.output = output
+ raise e
+
+ def clean_up_existing_files(self):
+ files_to_clean = [self.ssl_dictionary['ca_private_key'],
+ self.ssl_dictionary['ca_cert'],
+ self.ssl_dictionary['signing_key'],
+ self.ssl_dictionary['signing_cert'],
+ ]
+
+ existing_files = []
+
+ for file_path in files_to_clean:
+ if file_exists(file_path):
+ if self.rebuild:
+ # The file exists but the user wants to rebuild it, so blow
+ # it away
+ try:
+ os.remove(file_path)
+ except OSError as exc:
+ LOG.error(_LE('Failed to remove file %(file_path)r: '
+ '%(error)s'),
+ {'file_path': file_path,
+ 'error': exc.strerror})
+ raise
+ else:
+ existing_files.append(file_path)
+
+ return existing_files
+
+ def build_ssl_config_file(self):
+ utils.make_dirs(os.path.dirname(self.ssl_config_file_name),
+ mode=PUBLIC_DIR_PERMS,
+ user=self.use_keystone_user,
+ group=self.use_keystone_group, log=LOG)
+ if not file_exists(self.ssl_config_file_name):
+ ssl_config_file = open(self.ssl_config_file_name, 'w')
+ ssl_config_file.write(self.sslconfig % self.ssl_dictionary)
+ ssl_config_file.close()
+ utils.set_permissions(self.ssl_config_file_name,
+ mode=PRIVATE_FILE_PERMS,
+ user=self.use_keystone_user,
+ group=self.use_keystone_group, log=LOG)
+
+ index_file_name = os.path.join(self.conf_dir, 'index.txt')
+ if not file_exists(index_file_name):
+ index_file = open(index_file_name, 'w')
+ index_file.write('')
+ index_file.close()
+ utils.set_permissions(index_file_name,
+ mode=PRIVATE_FILE_PERMS,
+ user=self.use_keystone_user,
+ group=self.use_keystone_group, log=LOG)
+
+ serial_file_name = os.path.join(self.conf_dir, 'serial')
+ if not file_exists(serial_file_name):
+ index_file = open(serial_file_name, 'w')
+ index_file.write('01')
+ index_file.close()
+ utils.set_permissions(serial_file_name,
+ mode=PRIVATE_FILE_PERMS,
+ user=self.use_keystone_user,
+ group=self.use_keystone_group, log=LOG)
+
+ def build_ca_cert(self):
+ ca_key_file = self.ssl_dictionary['ca_private_key']
+ utils.make_dirs(os.path.dirname(ca_key_file),
+ mode=PRIVATE_DIR_PERMS,
+ user=self.use_keystone_user,
+ group=self.use_keystone_group, log=LOG)
+ if not file_exists(ca_key_file):
+ self.exec_command(['openssl', 'genrsa',
+ '-out', '%(ca_private_key)s',
+ '%(key_size)d'])
+ utils.set_permissions(ca_key_file,
+ mode=PRIVATE_FILE_PERMS,
+ user=self.use_keystone_user,
+ group=self.use_keystone_group, log=LOG)
+
+ ca_cert = self.ssl_dictionary['ca_cert']
+ utils.make_dirs(os.path.dirname(ca_cert),
+ mode=PUBLIC_DIR_PERMS,
+ user=self.use_keystone_user,
+ group=self.use_keystone_group, log=LOG)
+ if not file_exists(ca_cert):
+ self.exec_command(['openssl', 'req', '-new', '-x509',
+ '-extensions', 'v3_ca',
+ '-key', '%(ca_private_key)s',
+ '-out', '%(ca_cert)s',
+ '-days', '%(valid_days)d',
+ '-config', '%(ssl_config)s',
+ '-subj', '%(cert_subject)s'])
+ utils.set_permissions(ca_cert,
+ mode=PUBLIC_FILE_PERMS,
+ user=self.use_keystone_user,
+ group=self.use_keystone_group, log=LOG)
+
+ def build_private_key(self):
+ signing_keyfile = self.ssl_dictionary['signing_key']
+ utils.make_dirs(os.path.dirname(signing_keyfile),
+ mode=PRIVATE_DIR_PERMS,
+ user=self.use_keystone_user,
+ group=self.use_keystone_group, log=LOG)
+ if not file_exists(signing_keyfile):
+ self.exec_command(['openssl', 'genrsa', '-out', '%(signing_key)s',
+ '%(key_size)d'])
+ utils.set_permissions(signing_keyfile,
+ mode=PRIVATE_FILE_PERMS,
+ user=self.use_keystone_user,
+ group=self.use_keystone_group, log=LOG)
+
+ def build_signing_cert(self):
+ signing_cert = self.ssl_dictionary['signing_cert']
+
+ utils.make_dirs(os.path.dirname(signing_cert),
+ mode=PUBLIC_DIR_PERMS,
+ user=self.use_keystone_user,
+ group=self.use_keystone_group, log=LOG)
+ if not file_exists(signing_cert):
+ self.exec_command(['openssl', 'req', '-key', '%(signing_key)s',
+ '-new', '-out', '%(request_file)s',
+ '-config', '%(ssl_config)s',
+ '-subj', '%(cert_subject)s'])
+
+ self.exec_command(['openssl', 'ca', '-batch',
+ '-out', '%(signing_cert)s',
+ '-config', '%(ssl_config)s',
+ '-days', '%(valid_days)dd',
+ '-cert', '%(ca_cert)s',
+ '-keyfile', '%(ca_private_key)s',
+ '-infiles', '%(request_file)s'])
+
+ def run(self):
+ try:
+ existing_files = self.clean_up_existing_files()
+ except OSError:
+ print('An error occurred when rebuilding cert files.')
+ return
+ if existing_files:
+ print('The following cert files already exist, use --rebuild to '
+ 'remove the existing files before regenerating:')
+ for f in existing_files:
+ print('%s already exists' % f)
+ return
+
+ self.build_ssl_config_file()
+ self.build_ca_cert()
+ self.build_private_key()
+ self.build_signing_cert()
+
+
+class ConfigurePKI(BaseCertificateConfigure):
+ """Generate files for PKI signing using OpenSSL.
+
+ Signed tokens require a private key and signing certificate which itself
+ must be signed by a CA. This class generates them with workable defaults
+ if each of the files are not present
+
+ """
+
+ def __init__(self, keystone_user, keystone_group, rebuild=False):
+ super(ConfigurePKI, self).__init__(CONF.signing, CONF.signing,
+ keystone_user, keystone_group,
+ rebuild=rebuild)
+
+
+class ConfigureSSL(BaseCertificateConfigure):
+ """Generate files for HTTPS using OpenSSL.
+
+ Creates a public/private key and certificates. If a CA is not given
+ one will be generated using provided arguments.
+ """
+
+ def __init__(self, keystone_user, keystone_group, rebuild=False):
+ super(ConfigureSSL, self).__init__(CONF.ssl, CONF.eventlet_server_ssl,
+ keystone_user, keystone_group,
+ rebuild=rebuild)
+
+
+BaseCertificateConfigure.sslconfig = """
+# OpenSSL configuration file.
+#
+
+# Establish working directory.
+
+dir = %(conf_dir)s
+
+[ ca ]
+default_ca = CA_default
+
+[ CA_default ]
+new_certs_dir = $dir
+serial = $dir/serial
+database = $dir/index.txt
+default_days = 365
+default_md = %(default_md)s
+preserve = no
+email_in_dn = no
+nameopt = default_ca
+certopt = default_ca
+policy = policy_anything
+x509_extensions = usr_cert
+unique_subject = no
+
+[ policy_anything ]
+countryName = optional
+stateOrProvinceName = optional
+organizationName = optional
+organizationalUnitName = optional
+commonName = supplied
+emailAddress = optional
+
+[ req ]
+default_bits = 2048 # Size of keys
+default_keyfile = key.pem # name of generated keys
+string_mask = utf8only # permitted characters
+distinguished_name = req_distinguished_name
+req_extensions = v3_req
+x509_extensions = v3_ca
+
+[ req_distinguished_name ]
+countryName = Country Name (2 letter code)
+countryName_min = 2
+countryName_max = 2
+stateOrProvinceName = State or Province Name (full name)
+localityName = Locality Name (city, district)
+0.organizationName = Organization Name (company)
+organizationalUnitName = Organizational Unit Name (department, division)
+commonName = Common Name (hostname, IP, or your name)
+commonName_max = 64
+emailAddress = Email Address
+emailAddress_max = 64
+
+[ v3_ca ]
+basicConstraints = CA:TRUE
+subjectKeyIdentifier = hash
+authorityKeyIdentifier = keyid:always,issuer
+
+[ v3_req ]
+basicConstraints = CA:FALSE
+keyUsage = nonRepudiation, digitalSignature, keyEncipherment
+
+[ usr_cert ]
+basicConstraints = CA:FALSE
+subjectKeyIdentifier = hash
+authorityKeyIdentifier = keyid:always
+"""
diff --git a/keystone-moon/keystone/common/pemutils.py b/keystone-moon/keystone/common/pemutils.py
new file mode 100755
index 00000000..ddbe05cf
--- /dev/null
+++ b/keystone-moon/keystone/common/pemutils.py
@@ -0,0 +1,509 @@
+# Copyright 2013 Red Hat, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+
+"""
+PEM formatted data is used frequently in conjunction with X509 PKI as
+a data exchange mechanism for binary data. The acronym PEM stands for
+Privacy Enhanced Mail as defined in RFC-1421. Contrary to expectation
+the PEM format in common use has little to do with RFC-1421. Instead
+what we know as PEM format grew out of the need for a data exchange
+mechanism largely by the influence of OpenSSL. Other X509
+implementations have adopted it.
+
+Unfortunately PEM format has never been officially standarized. It's
+basic format is as follows:
+
+1) A header consisting of 5 hyphens followed by the word BEGIN and a
+single space. Then an upper case string describing the contents of the
+PEM block, this is followed by 5 hyphens and a newline.
+
+2) Binary data (typically in DER ASN.1 format) encoded in base64. The
+base64 text is line wrapped so that each line of base64 is 64
+characters long and terminated with a newline. The last line of base64
+text may be less than 64 characters. The content and format of the
+binary data is entirely dependent upon the type of data announced in
+the header and footer.
+
+3) A footer in the exact same as the header except the word BEGIN is
+replaced by END. The content name in both the header and footer should
+exactly match.
+
+The above is called a PEM block. It is permissible for multiple PEM
+blocks to appear in a single file or block of text. This is often used
+when specifying multiple X509 certificates.
+
+An example PEM block for a certificate is:
+
+-----BEGIN CERTIFICATE-----
+MIIC0TCCAjqgAwIBAgIJANsHKV73HYOwMA0GCSqGSIb3DQEBBQUAMIGeMQowCAYD
+VQQFEwE1MQswCQYDVQQGEwJVUzELMAkGA1UECBMCQ0ExEjAQBgNVBAcTCVN1bm55
+dmFsZTESMBAGA1UEChMJT3BlblN0YWNrMREwDwYDVQQLEwhLZXlzdG9uZTElMCMG
+CSqGSIb3DQEJARYWa2V5c3RvbmVAb3BlbnN0YWNrLm9yZzEUMBIGA1UEAxMLU2Vs
+ZiBTaWduZWQwIBcNMTIxMTA1MTgxODI0WhgPMjA3MTA0MzAxODE4MjRaMIGeMQow
+CAYDVQQFEwE1MQswCQYDVQQGEwJVUzELMAkGA1UECBMCQ0ExEjAQBgNVBAcTCVN1
+bm55dmFsZTESMBAGA1UEChMJT3BlblN0YWNrMREwDwYDVQQLEwhLZXlzdG9uZTEl
+MCMGCSqGSIb3DQEJARYWa2V5c3RvbmVAb3BlbnN0YWNrLm9yZzEUMBIGA1UEAxML
+U2VsZiBTaWduZWQwgZ8wDQYJKoZIhvcNAQEBBQADgY0AMIGJAoGBALzI17ExCaqd
+r7xY2Q5CBZ1bW1lsrXxS8eNJRdQtskDuQVAluY03/OGZd8HQYiiY/ci2tYy7BNIC
+bh5GaO95eqTDykJR3liOYE/tHbY6puQlj2ZivmhlSd2d5d7lF0/H28RQsLu9VktM
+uw6q9DpDm35jfrr8LgSeA3MdVqcS/4OhAgMBAAGjEzARMA8GA1UdEwEB/wQFMAMB
+Af8wDQYJKoZIhvcNAQEFBQADgYEAjSQND7i1dNZtLKpWgX+JqMr3BdVlM15mFeVr
+C26ZspZjZVY5okdozO9gU3xcwRe4Cg30sKFOe6EBQKpkTZucFOXwBtD3h6dWJrdD
+c+m/CL/rs0GatDavbaIT2vv405SQUQooCdVh72LYel+4/a6xmRd7fQx3iEXN9QYj
+vmHJUcA=
+-----END CERTIFICATE-----
+
+PEM format is safe for transmission in 7-bit ASCII systems
+(i.e. standard email). Since 7-bit ASCII is a proper subset of UTF-8
+and Latin-1 it is not affected by transcoding between those
+charsets. Nor is PEM format affected by the choice of line
+endings. This makes PEM format particularity attractive for transport
+and storage of binary data.
+
+This module provides a number of utilities supporting the generation
+and consumption of PEM formatted data including:
+
+ * parse text and find all PEM blocks contained in the
+ text. Information on the location of the block in the text, the
+ type of PEM block, and it's base64 and binary data contents.
+
+ * parse text assumed to contain PEM data and return the binary
+ data.
+
+ * test if a block of text is a PEM block
+
+ * convert base64 text into a formatted PEM block
+
+ * convert binary data into a formatted PEM block
+
+ * access to the valid PEM types and their headers
+
+"""
+
+import base64
+import re
+
+import six
+
+from keystone.common import base64utils
+from keystone.i18n import _
+
+
+PEM_TYPE_TO_HEADER = {
+ u'cms': u'CMS',
+ u'dsa-private': u'DSA PRIVATE KEY',
+ u'dsa-public': u'DSA PUBLIC KEY',
+ u'ecdsa-public': u'ECDSA PUBLIC KEY',
+ u'ec-private': u'EC PRIVATE KEY',
+ u'pkcs7': u'PKCS7',
+ u'pkcs7-signed': u'PKCS',
+ u'pkcs8': u'ENCRYPTED PRIVATE KEY',
+ u'private-key': u'PRIVATE KEY',
+ u'public-key': u'PUBLIC KEY',
+ u'rsa-private': u'RSA PRIVATE KEY',
+ u'rsa-public': u'RSA PUBLIC KEY',
+ u'cert': u'CERTIFICATE',
+ u'crl': u'X509 CRL',
+ u'cert-pair': u'CERTIFICATE PAIR',
+ u'csr': u'CERTIFICATE REQUEST',
+}
+
+# This is not a 1-to-1 reverse map of PEM_TYPE_TO_HEADER
+# because it includes deprecated headers that map to 1 pem_type.
+PEM_HEADER_TO_TYPE = {
+ u'CMS': u'cms',
+ u'DSA PRIVATE KEY': u'dsa-private',
+ u'DSA PUBLIC KEY': u'dsa-public',
+ u'ECDSA PUBLIC KEY': u'ecdsa-public',
+ u'EC PRIVATE KEY': u'ec-private',
+ u'PKCS7': u'pkcs7',
+ u'PKCS': u'pkcs7-signed',
+ u'ENCRYPTED PRIVATE KEY': u'pkcs8',
+ u'PRIVATE KEY': u'private-key',
+ u'PUBLIC KEY': u'public-key',
+ u'RSA PRIVATE KEY': u'rsa-private',
+ u'RSA PUBLIC KEY': u'rsa-public',
+ u'CERTIFICATE': u'cert',
+ u'X509 CERTIFICATE': u'cert',
+ u'CERTIFICATE PAIR': u'cert-pair',
+ u'X509 CRL': u'crl',
+ u'CERTIFICATE REQUEST': u'csr',
+ u'NEW CERTIFICATE REQUEST': u'csr',
+}
+
+# List of valid pem_types
+pem_types = sorted(PEM_TYPE_TO_HEADER.keys())
+
+# List of valid pem_headers
+pem_headers = sorted(PEM_TYPE_TO_HEADER.values())
+
+_pem_begin_re = re.compile(r'^-{5}BEGIN\s+([^-]+)-{5}\s*$', re.MULTILINE)
+_pem_end_re = re.compile(r'^-{5}END\s+([^-]+)-{5}\s*$', re.MULTILINE)
+
+
+class PEMParseResult(object):
+ """Information returned when a PEM block is found in text.
+
+ PEMParseResult contains information about a PEM block discovered
+ while parsing text. The following properties are defined:
+
+ pem_type
+ A short hand name for the type of the PEM data, e.g. cert,
+ csr, crl, cms, key. Valid pem_types are listed in pem_types.
+ When the pem_type is set the pem_header is updated to match it.
+
+ pem_header
+ The text following '-----BEGIN ' in the PEM header.
+ Common examples are:
+
+ -----BEGIN CERTIFICATE-----
+ -----BEGIN CMS-----
+
+ Thus the pem_header would be CERTIFICATE and CMS respectively.
+ When the pem_header is set the pem_type is updated to match it.
+
+ pem_start, pem_end
+ The beginning and ending positions of the PEM block
+ including the PEM header and footer.
+
+ base64_start, base64_end
+ The beginning and ending positions of the base64 data
+ contained inside the PEM header and footer. Includes trailing
+ new line
+
+ binary_data
+ The decoded base64 data. None if not decoded.
+
+ """
+
+ def __init__(self, pem_type=None, pem_header=None,
+ pem_start=None, pem_end=None,
+ base64_start=None, base64_end=None,
+ binary_data=None):
+
+ self._pem_type = None
+ self._pem_header = None
+
+ if pem_type is not None:
+ self.pem_type = pem_type
+
+ if pem_header is not None:
+ self.pem_header = pem_header
+
+ self.pem_start = pem_start
+ self.pem_end = pem_end
+ self.base64_start = base64_start
+ self.base64_end = base64_end
+ self.binary_data = binary_data
+
+ @property
+ def pem_type(self):
+ return self._pem_type
+
+ @pem_type.setter
+ def pem_type(self, pem_type):
+ if pem_type is None:
+ self._pem_type = None
+ self._pem_header = None
+ else:
+ pem_header = PEM_TYPE_TO_HEADER.get(pem_type)
+ if pem_header is None:
+ raise ValueError(_('unknown pem_type "%(pem_type)s", '
+ 'valid types are: %(valid_pem_types)s') %
+ {'pem_type': pem_type,
+ 'valid_pem_types': ', '.join(pem_types)})
+ self._pem_type = pem_type
+ self._pem_header = pem_header
+
+ @property
+ def pem_header(self):
+ return self._pem_header
+
+ @pem_header.setter
+ def pem_header(self, pem_header):
+ if pem_header is None:
+ self._pem_type = None
+ self._pem_header = None
+ else:
+ pem_type = PEM_HEADER_TO_TYPE.get(pem_header)
+ if pem_type is None:
+ raise ValueError(_('unknown pem header "%(pem_header)s", '
+ 'valid headers are: '
+ '%(valid_pem_headers)s') %
+ {'pem_header': pem_header,
+ 'valid_pem_headers':
+ ', '.join("'%s'" %
+ [x for x in pem_headers])})
+
+ self._pem_type = pem_type
+ self._pem_header = pem_header
+
+
+def pem_search(text, start=0):
+ """Search for a block of PEM formatted data
+
+ Search for a PEM block in a text string. The search begins at
+ start. If a PEM block is found a PEMParseResult object is
+ returned, otherwise if no PEM block is found None is returned.
+
+ If the pem_type is not the same in both the header and footer
+ a ValueError is raised.
+
+ The start and end positions are suitable for use as slices into
+ the text. To search for multiple PEM blocks pass pem_end as the
+ start position for the next iteration. Terminate the iteration
+ when None is returned. Example::
+
+ start = 0
+ while True:
+ block = pem_search(text, start)
+ if block is None:
+ break
+ base64_data = text[block.base64_start : block.base64_end]
+ start = block.pem_end
+
+ :param text: the text to search for PEM blocks
+ :type text: string
+ :param start: the position in text to start searching from (default: 0)
+ :type start: int
+ :returns: PEMParseResult or None if not found
+ :raises: ValueError
+ """
+
+ match = _pem_begin_re.search(text, pos=start)
+ if match:
+ pem_start = match.start()
+ begin_text = match.group(0)
+ base64_start = min(len(text), match.end() + 1)
+ begin_pem_header = match.group(1).strip()
+
+ match = _pem_end_re.search(text, pos=base64_start)
+ if match:
+ pem_end = min(len(text), match.end() + 1)
+ base64_end = match.start()
+ end_pem_header = match.group(1).strip()
+ else:
+ raise ValueError(_('failed to find end matching "%s"') %
+ begin_text)
+
+ if begin_pem_header != end_pem_header:
+ raise ValueError(_('beginning & end PEM headers do not match '
+ '(%(begin_pem_header)s'
+ '!= '
+ '%(end_pem_header)s)') %
+ {'begin_pem_header': begin_pem_header,
+ 'end_pem_header': end_pem_header})
+ else:
+ return None
+
+ result = PEMParseResult(pem_header=begin_pem_header,
+ pem_start=pem_start, pem_end=pem_end,
+ base64_start=base64_start, base64_end=base64_end)
+
+ return result
+
+
+def parse_pem(text, pem_type=None, max_items=None):
+ """Scan text for PEM data, return list of PEM items
+
+ The input text is scanned for PEM blocks, for each one found a
+ PEMParseResult is constructed and added to the return list.
+
+ pem_type operates as a filter on the type of PEM desired. If
+ pem_type is specified only those PEM blocks which match will be
+ included. The pem_type is a logical name, not the actual text in
+ the pem header (e.g. 'cert'). If the pem_type is None all PEM
+ blocks are returned.
+
+ If max_items is specified the result is limited to that number of
+ items.
+
+ The return value is a list of PEMParseResult objects. The
+ PEMParseResult provides complete information about the PEM block
+ including the decoded binary data for the PEM block. The list is
+ ordered in the same order as found in the text.
+
+ Examples::
+
+ # Get all certs
+ certs = parse_pem(text, 'cert')
+
+ # Get the first cert
+ try:
+ binary_cert = parse_pem(text, 'cert', 1)[0].binary_data
+ except IndexError:
+ raise ValueError('no cert found')
+
+ :param text: The text to search for PEM blocks
+ :type text: string
+ :param pem_type: Only return data for this pem_type.
+ Valid types are: csr, cert, crl, cms, key.
+ If pem_type is None no filtering is performed.
+ (default: None)
+ :type pem_type: string or None
+ :param max_items: Limit the number of blocks returned. (default: None)
+ :type max_items: int or None
+ :return: List of PEMParseResult, one for each PEM block found
+ :raises: ValueError, InvalidBase64Error
+ """
+
+ pem_blocks = []
+ start = 0
+
+ while True:
+ block = pem_search(text, start)
+ if block is None:
+ break
+ start = block.pem_end
+ if pem_type is None:
+ pem_blocks.append(block)
+ else:
+ try:
+ if block.pem_type == pem_type:
+ pem_blocks.append(block)
+ except KeyError:
+ raise ValueError(_('unknown pem_type: "%s"') % (pem_type))
+
+ if max_items is not None and len(pem_blocks) >= max_items:
+ break
+
+ for block in pem_blocks:
+ base64_data = text[block.base64_start:block.base64_end]
+ try:
+ binary_data = base64.b64decode(base64_data)
+ except Exception as e:
+ block.binary_data = None
+ raise base64utils.InvalidBase64Error(
+ _('failed to base64 decode %(pem_type)s PEM at position'
+ '%(position)d: %(err_msg)s') %
+ {'pem_type': block.pem_type,
+ 'position': block.pem_start,
+ 'err_msg': six.text_type(e)})
+ else:
+ block.binary_data = binary_data
+
+ return pem_blocks
+
+
+def get_pem_data(text, pem_type='cert'):
+ """Scan text for PEM data, return binary contents
+
+ The input text is scanned for a PEM block which matches the pem_type.
+ If found the binary data contained in the PEM block is returned.
+ If no PEM block is found or it does not match the specified pem type
+ None is returned.
+
+ :param text: The text to search for the PEM block
+ :type text: string
+ :param pem_type: Only return data for this pem_type.
+ Valid types are: csr, cert, crl, cms, key.
+ (default: 'cert')
+ :type pem_type: string
+ :return: binary data or None if not found.
+ """
+
+ blocks = parse_pem(text, pem_type, 1)
+ if not blocks:
+ return None
+ return blocks[0].binary_data
+
+
+def is_pem(text, pem_type='cert'):
+ """Does this text contain a PEM block.
+
+ Check for the existence of a PEM formatted block in the
+ text, if one is found verify it's contents can be base64
+ decoded, if so return True. Return False otherwise.
+
+ :param text: The text to search for PEM blocks
+ :type text: string
+ :param pem_type: Only return data for this pem_type.
+ Valid types are: csr, cert, crl, cms, key.
+ (default: 'cert')
+ :type pem_type: string
+ :returns: bool -- True if text contains PEM matching the pem_type,
+ False otherwise.
+ """
+
+ try:
+ pem_blocks = parse_pem(text, pem_type, max_items=1)
+ except base64utils.InvalidBase64Error:
+ return False
+
+ if pem_blocks:
+ return True
+ else:
+ return False
+
+
+def base64_to_pem(base64_text, pem_type='cert'):
+ """Format string of base64 text into PEM format
+
+ Input is assumed to consist only of members of the base64 alphabet
+ (i.e no whitepace). Use one of the filter functions from
+ base64utils to assure the input is clean
+ (i.e. strip_whitespace()).
+
+ :param base64_text: text containing ONLY base64 alphabet
+ characters to be inserted into PEM output.
+ :type base64_text: string
+ :param pem_type: Produce a PEM block for this type.
+ Valid types are: csr, cert, crl, cms, key.
+ (default: 'cert')
+ :type pem_type: string
+ :returns: string -- PEM formatted text
+
+
+ """
+ pem_header = PEM_TYPE_TO_HEADER[pem_type]
+ buf = six.StringIO()
+
+ buf.write(u'-----BEGIN %s-----' % pem_header)
+ buf.write(u'\n')
+
+ for line in base64utils.base64_wrap_iter(base64_text, width=64):
+ buf.write(line)
+ buf.write(u'\n')
+
+ buf.write(u'-----END %s-----' % pem_header)
+ buf.write(u'\n')
+
+ text = buf.getvalue()
+ buf.close()
+ return text
+
+
+def binary_to_pem(binary_data, pem_type='cert'):
+ """Format binary data into PEM format
+
+ Example:
+
+ # get the certificate binary data in DER format
+ der_data = certificate.der
+ # convert the DER binary data into a PEM
+ pem = binary_to_pem(der_data, 'cert')
+
+
+ :param binary_data: binary data to encapsulate into PEM
+ :type binary_data: buffer
+ :param pem_type: Produce a PEM block for this type.
+ Valid types are: csr, cert, crl, cms, key.
+ (default: 'cert')
+ :type pem_type: string
+ :returns: string -- PEM formatted text
+
+ """
+ base64_text = base64.b64encode(binary_data)
+ return base64_to_pem(base64_text, pem_type)
diff --git a/keystone-moon/keystone/common/router.py b/keystone-moon/keystone/common/router.py
new file mode 100644
index 00000000..ce4e834d
--- /dev/null
+++ b/keystone-moon/keystone/common/router.py
@@ -0,0 +1,80 @@
+# Copyright 2012 OpenStack Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from keystone.common import json_home
+from keystone.common import wsgi
+
+
+class Router(wsgi.ComposableRouter):
+ def __init__(self, controller, collection_key, key,
+ resource_descriptions=None,
+ is_entity_implemented=True):
+ self.controller = controller
+ self.key = key
+ self.collection_key = collection_key
+ self._resource_descriptions = resource_descriptions
+ self._is_entity_implemented = is_entity_implemented
+
+ def add_routes(self, mapper):
+ collection_path = '/%(collection_key)s' % {
+ 'collection_key': self.collection_key}
+ entity_path = '/%(collection_key)s/{%(key)s_id}' % {
+ 'collection_key': self.collection_key,
+ 'key': self.key}
+
+ mapper.connect(
+ collection_path,
+ controller=self.controller,
+ action='create_%s' % self.key,
+ conditions=dict(method=['POST']))
+ mapper.connect(
+ collection_path,
+ controller=self.controller,
+ action='list_%s' % self.collection_key,
+ conditions=dict(method=['GET']))
+ mapper.connect(
+ entity_path,
+ controller=self.controller,
+ action='get_%s' % self.key,
+ conditions=dict(method=['GET']))
+ mapper.connect(
+ entity_path,
+ controller=self.controller,
+ action='update_%s' % self.key,
+ conditions=dict(method=['PATCH']))
+ mapper.connect(
+ entity_path,
+ controller=self.controller,
+ action='delete_%s' % self.key,
+ conditions=dict(method=['DELETE']))
+
+ # Add the collection resource and entity resource to the resource
+ # descriptions.
+
+ collection_rel = json_home.build_v3_resource_relation(
+ self.collection_key)
+ rel_data = {'href': collection_path, }
+ self._resource_descriptions.append((collection_rel, rel_data))
+
+ if self._is_entity_implemented:
+ entity_rel = json_home.build_v3_resource_relation(self.key)
+ id_str = '%s_id' % self.key
+ id_param_rel = json_home.build_v3_parameter_relation(id_str)
+ entity_rel_data = {
+ 'href-template': entity_path,
+ 'href-vars': {
+ id_str: id_param_rel,
+ },
+ }
+ self._resource_descriptions.append((entity_rel, entity_rel_data))
diff --git a/keystone-moon/keystone/common/sql/__init__.py b/keystone-moon/keystone/common/sql/__init__.py
new file mode 100644
index 00000000..84e0fb83
--- /dev/null
+++ b/keystone-moon/keystone/common/sql/__init__.py
@@ -0,0 +1,15 @@
+# Copyright 2012 OpenStack Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from keystone.common.sql.core import * # noqa
diff --git a/keystone-moon/keystone/common/sql/core.py b/keystone-moon/keystone/common/sql/core.py
new file mode 100644
index 00000000..bf168701
--- /dev/null
+++ b/keystone-moon/keystone/common/sql/core.py
@@ -0,0 +1,431 @@
+# Copyright 2012 OpenStack Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""SQL backends for the various services.
+
+Before using this module, call initialize(). This has to be done before
+CONF() because it sets up configuration options.
+
+"""
+import contextlib
+import functools
+
+from oslo_config import cfg
+from oslo_db import exception as db_exception
+from oslo_db import options as db_options
+from oslo_db.sqlalchemy import models
+from oslo_db.sqlalchemy import session as db_session
+from oslo_log import log
+from oslo_serialization import jsonutils
+import six
+import sqlalchemy as sql
+from sqlalchemy.ext import declarative
+from sqlalchemy.orm.attributes import flag_modified, InstrumentedAttribute
+from sqlalchemy import types as sql_types
+
+from keystone.common import utils
+from keystone import exception
+from keystone.i18n import _
+
+
+CONF = cfg.CONF
+LOG = log.getLogger(__name__)
+
+ModelBase = declarative.declarative_base()
+
+
+# For exporting to other modules
+Column = sql.Column
+Index = sql.Index
+String = sql.String
+Integer = sql.Integer
+Enum = sql.Enum
+ForeignKey = sql.ForeignKey
+DateTime = sql.DateTime
+IntegrityError = sql.exc.IntegrityError
+DBDuplicateEntry = db_exception.DBDuplicateEntry
+OperationalError = sql.exc.OperationalError
+NotFound = sql.orm.exc.NoResultFound
+Boolean = sql.Boolean
+Text = sql.Text
+UniqueConstraint = sql.UniqueConstraint
+PrimaryKeyConstraint = sql.PrimaryKeyConstraint
+joinedload = sql.orm.joinedload
+# Suppress flake8's unused import warning for flag_modified:
+flag_modified = flag_modified
+
+
+def initialize():
+ """Initialize the module."""
+
+ db_options.set_defaults(
+ CONF,
+ connection="sqlite:///keystone.db")
+
+
+def initialize_decorator(init):
+ """Ensure that the length of string field do not exceed the limit.
+
+ This decorator check the initialize arguments, to make sure the
+ length of string field do not exceed the length limit, or raise a
+ 'StringLengthExceeded' exception.
+
+ Use decorator instead of inheritance, because the metaclass will
+ check the __tablename__, primary key columns, etc. at the class
+ definition.
+
+ """
+ def initialize(self, *args, **kwargs):
+ cls = type(self)
+ for k, v in kwargs.items():
+ if hasattr(cls, k):
+ attr = getattr(cls, k)
+ if isinstance(attr, InstrumentedAttribute):
+ column = attr.property.columns[0]
+ if isinstance(column.type, String):
+ if not isinstance(v, six.text_type):
+ v = six.text_type(v)
+ if column.type.length and column.type.length < len(v):
+ raise exception.StringLengthExceeded(
+ string=v, type=k, length=column.type.length)
+
+ init(self, *args, **kwargs)
+ return initialize
+
+ModelBase.__init__ = initialize_decorator(ModelBase.__init__)
+
+
+# Special Fields
+class JsonBlob(sql_types.TypeDecorator):
+
+ impl = sql.Text
+
+ def process_bind_param(self, value, dialect):
+ return jsonutils.dumps(value)
+
+ def process_result_value(self, value, dialect):
+ return jsonutils.loads(value)
+
+
+class DictBase(models.ModelBase):
+ attributes = []
+
+ @classmethod
+ def from_dict(cls, d):
+ new_d = d.copy()
+
+ new_d['extra'] = {k: new_d.pop(k) for k in six.iterkeys(d)
+ if k not in cls.attributes and k != 'extra'}
+
+ return cls(**new_d)
+
+ def to_dict(self, include_extra_dict=False):
+ """Returns the model's attributes as a dictionary.
+
+ If include_extra_dict is True, 'extra' attributes are literally
+ included in the resulting dictionary twice, for backwards-compatibility
+ with a broken implementation.
+
+ """
+ d = self.extra.copy()
+ for attr in self.__class__.attributes:
+ d[attr] = getattr(self, attr)
+
+ if include_extra_dict:
+ d['extra'] = self.extra.copy()
+
+ return d
+
+ def __getitem__(self, key):
+ if key in self.extra:
+ return self.extra[key]
+ return getattr(self, key)
+
+
+class ModelDictMixin(object):
+
+ @classmethod
+ def from_dict(cls, d):
+ """Returns a model instance from a dictionary."""
+ return cls(**d)
+
+ def to_dict(self):
+ """Returns the model's attributes as a dictionary."""
+ names = (column.name for column in self.__table__.columns)
+ return {name: getattr(self, name) for name in names}
+
+
+_engine_facade = None
+
+
+def _get_engine_facade():
+ global _engine_facade
+
+ if not _engine_facade:
+ _engine_facade = db_session.EngineFacade.from_config(CONF)
+
+ return _engine_facade
+
+
+def cleanup():
+ global _engine_facade
+
+ _engine_facade = None
+
+
+def get_engine():
+ return _get_engine_facade().get_engine()
+
+
+def get_session(expire_on_commit=False):
+ return _get_engine_facade().get_session(expire_on_commit=expire_on_commit)
+
+
+@contextlib.contextmanager
+def transaction(expire_on_commit=False):
+ """Return a SQLAlchemy session in a scoped transaction."""
+ session = get_session(expire_on_commit=expire_on_commit)
+ with session.begin():
+ yield session
+
+
+def truncated(f):
+ """Ensure list truncation is detected in Driver list entity methods.
+
+ This is designed to wrap and sql Driver list_{entity} methods in order to
+ calculate if the resultant list has been truncated. Provided a limit dict
+ is found in the hints list, we increment the limit by one so as to ask the
+ wrapped function for one more entity than the limit, and then once the list
+ has been generated, we check to see if the original limit has been
+ exceeded, in which case we truncate back to that limit and set the
+ 'truncated' boolean to 'true' in the hints limit dict.
+
+ """
+ @functools.wraps(f)
+ def wrapper(self, hints, *args, **kwargs):
+ if not hasattr(hints, 'limit'):
+ raise exception.UnexpectedError(
+ _('Cannot truncate a driver call without hints list as '
+ 'first parameter after self '))
+
+ if hints.limit is None:
+ return f(self, hints, *args, **kwargs)
+
+ # A limit is set, so ask for one more entry than we need
+ list_limit = hints.limit['limit']
+ hints.set_limit(list_limit + 1)
+ ref_list = f(self, hints, *args, **kwargs)
+
+ # If we got more than the original limit then trim back the list and
+ # mark it truncated. In both cases, make sure we set the limit back
+ # to its original value.
+ if len(ref_list) > list_limit:
+ hints.set_limit(list_limit, truncated=True)
+ return ref_list[:list_limit]
+ else:
+ hints.set_limit(list_limit)
+ return ref_list
+ return wrapper
+
+
+def _filter(model, query, hints):
+ """Applies filtering to a query.
+
+ :param model: the table model in question
+ :param query: query to apply filters to
+ :param hints: contains the list of filters yet to be satisfied.
+ Any filters satisfied here will be removed so that
+ the caller will know if any filters remain.
+
+ :returns query: query, updated with any filters satisfied
+
+ """
+ def inexact_filter(model, query, filter_, satisfied_filters, hints):
+ """Applies an inexact filter to a query.
+
+ :param model: the table model in question
+ :param query: query to apply filters to
+ :param filter_: the dict that describes this filter
+ :param satisfied_filters: a cumulative list of satisfied filters, to
+ which filter_ will be added if it is
+ satisfied.
+ :param hints: contains the list of filters yet to be satisfied.
+
+ :returns query: query updated to add any inexact filters we could
+ satisfy
+
+ """
+ column_attr = getattr(model, filter_['name'])
+
+ # TODO(henry-nash): Sqlalchemy 0.7 defaults to case insensitivity
+ # so once we find a way of changing that (maybe on a call-by-call
+ # basis), we can add support for the case sensitive versions of
+ # the filters below. For now, these case sensitive versions will
+ # be handled at the controller level.
+
+ if filter_['case_sensitive']:
+ return query
+
+ if filter_['comparator'] == 'contains':
+ query_term = column_attr.ilike('%%%s%%' % filter_['value'])
+ elif filter_['comparator'] == 'startswith':
+ query_term = column_attr.ilike('%s%%' % filter_['value'])
+ elif filter_['comparator'] == 'endswith':
+ query_term = column_attr.ilike('%%%s' % filter_['value'])
+ else:
+ # It's a filter we don't understand, so let the caller
+ # work out if they need to do something with it.
+ return query
+
+ satisfied_filters.append(filter_)
+ return query.filter(query_term)
+
+ def exact_filter(
+ model, filter_, satisfied_filters, cumulative_filter_dict, hints):
+ """Applies an exact filter to a query.
+
+ :param model: the table model in question
+ :param filter_: the dict that describes this filter
+ :param satisfied_filters: a cumulative list of satisfied filters, to
+ which filter_ will be added if it is
+ satisfied.
+ :param cumulative_filter_dict: a dict that describes the set of
+ exact filters built up so far
+ :param hints: contains the list of filters yet to be satisfied.
+
+ :returns: updated cumulative dict
+
+ """
+ key = filter_['name']
+ if isinstance(getattr(model, key).property.columns[0].type,
+ sql.types.Boolean):
+ cumulative_filter_dict[key] = (
+ utils.attr_as_boolean(filter_['value']))
+ else:
+ cumulative_filter_dict[key] = filter_['value']
+ satisfied_filters.append(filter_)
+ return cumulative_filter_dict
+
+ filter_dict = {}
+ satisfied_filters = []
+ for filter_ in hints.filters:
+ if filter_['name'] not in model.attributes:
+ continue
+ if filter_['comparator'] == 'equals':
+ filter_dict = exact_filter(
+ model, filter_, satisfied_filters, filter_dict, hints)
+ else:
+ query = inexact_filter(
+ model, query, filter_, satisfied_filters, hints)
+
+ # Apply any exact filters we built up
+ if filter_dict:
+ query = query.filter_by(**filter_dict)
+
+ # Remove satisfied filters, then the caller will know remaining filters
+ for filter_ in satisfied_filters:
+ hints.filters.remove(filter_)
+
+ return query
+
+
+def _limit(query, hints):
+ """Applies a limit to a query.
+
+ :param query: query to apply filters to
+ :param hints: contains the list of filters and limit details.
+
+ :returns updated query
+
+ """
+ # NOTE(henry-nash): If we were to implement pagination, then we
+ # we would expand this method to support pagination and limiting.
+
+ # If we satisfied all the filters, set an upper limit if supplied
+ if hints.limit:
+ query = query.limit(hints.limit['limit'])
+ return query
+
+
+def filter_limit_query(model, query, hints):
+ """Applies filtering and limit to a query.
+
+ :param model: table model
+ :param query: query to apply filters to
+ :param hints: contains the list of filters and limit details. This may
+ be None, indicating that there are no filters or limits
+ to be applied. If it's not None, then any filters
+ satisfied here will be removed so that the caller will
+ know if any filters remain.
+
+ :returns: updated query
+
+ """
+ if hints is None:
+ return query
+
+ # First try and satisfy any filters
+ query = _filter(model, query, hints)
+
+ # NOTE(henry-nash): Any unsatisfied filters will have been left in
+ # the hints list for the controller to handle. We can only try and
+ # limit here if all the filters are already satisfied since, if not,
+ # doing so might mess up the final results. If there are still
+ # unsatisfied filters, we have to leave any limiting to the controller
+ # as well.
+
+ if not hints.filters:
+ return _limit(query, hints)
+ else:
+ return query
+
+
+def handle_conflicts(conflict_type='object'):
+ """Converts select sqlalchemy exceptions into HTTP 409 Conflict."""
+ _conflict_msg = 'Conflict %(conflict_type)s: %(details)s'
+
+ def decorator(method):
+ @functools.wraps(method)
+ def wrapper(*args, **kwargs):
+ try:
+ return method(*args, **kwargs)
+ except db_exception.DBDuplicateEntry as e:
+ # LOG the exception for debug purposes, do not send the
+ # exception details out with the raised Conflict exception
+ # as it can contain raw SQL.
+ LOG.debug(_conflict_msg, {'conflict_type': conflict_type,
+ 'details': six.text_type(e)})
+ raise exception.Conflict(type=conflict_type,
+ details=_('Duplicate Entry'))
+ except db_exception.DBError as e:
+ # TODO(blk-u): inspecting inner_exception breaks encapsulation;
+ # oslo_db should provide exception we need.
+ if isinstance(e.inner_exception, IntegrityError):
+ # LOG the exception for debug purposes, do not send the
+ # exception details out with the raised Conflict exception
+ # as it can contain raw SQL.
+ LOG.debug(_conflict_msg, {'conflict_type': conflict_type,
+ 'details': six.text_type(e)})
+ # NOTE(morganfainberg): This is really a case where the SQL
+ # failed to store the data. This is not something that the
+ # user has done wrong. Example would be a ForeignKey is
+ # missing; the code that is executed before reaching the
+ # SQL writing to the DB should catch the issue.
+ raise exception.UnexpectedError(
+ _('An unexpected error occurred when trying to '
+ 'store %s') % conflict_type)
+ raise
+
+ return wrapper
+ return decorator
diff --git a/keystone-moon/keystone/common/sql/migrate_repo/README b/keystone-moon/keystone/common/sql/migrate_repo/README
new file mode 100644
index 00000000..6218f8ca
--- /dev/null
+++ b/keystone-moon/keystone/common/sql/migrate_repo/README
@@ -0,0 +1,4 @@
+This is a database migration repository.
+
+More information at
+http://code.google.com/p/sqlalchemy-migrate/
diff --git a/keystone-moon/keystone/common/sql/migrate_repo/__init__.py b/keystone-moon/keystone/common/sql/migrate_repo/__init__.py
new file mode 100644
index 00000000..f73dfc12
--- /dev/null
+++ b/keystone-moon/keystone/common/sql/migrate_repo/__init__.py
@@ -0,0 +1,17 @@
+# Copyright 2014 Mirantis.inc
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+
+DB_INIT_VERSION = 43
diff --git a/keystone-moon/keystone/common/sql/migrate_repo/manage.py b/keystone-moon/keystone/common/sql/migrate_repo/manage.py
new file mode 100644
index 00000000..39fa3892
--- /dev/null
+++ b/keystone-moon/keystone/common/sql/migrate_repo/manage.py
@@ -0,0 +1,5 @@
+#!/usr/bin/env python
+from migrate.versioning.shell import main
+
+if __name__ == '__main__':
+ main(debug='False')
diff --git a/keystone-moon/keystone/common/sql/migrate_repo/migrate.cfg b/keystone-moon/keystone/common/sql/migrate_repo/migrate.cfg
new file mode 100644
index 00000000..db531bb4
--- /dev/null
+++ b/keystone-moon/keystone/common/sql/migrate_repo/migrate.cfg
@@ -0,0 +1,25 @@
+[db_settings]
+# Used to identify which repository this database is versioned under.
+# You can use the name of your project.
+repository_id=keystone
+
+# The name of the database table used to track the schema version.
+# This name shouldn't already be used by your project.
+# If this is changed once a database is under version control, you'll need to
+# change the table name in each database too.
+version_table=migrate_version
+
+# When committing a change script, Migrate will attempt to generate the
+# sql for all supported databases; normally, if one of them fails - probably
+# because you don't have that database installed - it is ignored and the
+# commit continues, perhaps ending successfully.
+# Databases in this list MUST compile successfully during a commit, or the
+# entire commit will fail. List the databases your application will actually
+# be using to ensure your updates to that database work properly.
+# This must be a list; example: ['postgres','sqlite']
+required_dbs=[]
+
+# When creating new change scripts, Migrate will stamp the new script with
+# a version number. By default this is latest_version + 1. You can set this
+# to 'true' to tell Migrate to use the UTC timestamp instead.
+use_timestamp_numbering=False
diff --git a/keystone-moon/keystone/common/sql/migrate_repo/versions/044_icehouse.py b/keystone-moon/keystone/common/sql/migrate_repo/versions/044_icehouse.py
new file mode 100644
index 00000000..6f326ecf
--- /dev/null
+++ b/keystone-moon/keystone/common/sql/migrate_repo/versions/044_icehouse.py
@@ -0,0 +1,279 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+
+import migrate
+from oslo_config import cfg
+from oslo_log import log
+import sqlalchemy as sql
+from sqlalchemy import orm
+
+from keystone.assignment.backends import sql as assignment_sql
+from keystone.common import sql as ks_sql
+from keystone.common.sql import migration_helpers
+
+
+LOG = log.getLogger(__name__)
+CONF = cfg.CONF
+
+
+def upgrade(migrate_engine):
+ meta = sql.MetaData()
+ meta.bind = migrate_engine
+
+ if migrate_engine.name == 'mysql':
+ # In Folsom we explicitly converted migrate_version to UTF8.
+ migrate_engine.execute(
+ 'ALTER TABLE migrate_version CONVERT TO CHARACTER SET utf8')
+ # Set default DB charset to UTF8.
+ migrate_engine.execute(
+ 'ALTER DATABASE %s DEFAULT CHARACTER SET utf8' %
+ migrate_engine.url.database)
+
+ credential = sql.Table(
+ 'credential', meta,
+ sql.Column('id', sql.String(length=64), primary_key=True),
+ sql.Column('user_id', sql.String(length=64), nullable=False),
+ sql.Column('project_id', sql.String(length=64)),
+ sql.Column('blob', ks_sql.JsonBlob, nullable=False),
+ sql.Column('type', sql.String(length=255), nullable=False),
+ sql.Column('extra', ks_sql.JsonBlob.impl),
+ mysql_engine='InnoDB',
+ mysql_charset='utf8')
+
+ domain = sql.Table(
+ 'domain', meta,
+ sql.Column('id', sql.String(length=64), primary_key=True),
+ sql.Column('name', sql.String(length=64), nullable=False),
+ sql.Column('enabled', sql.Boolean, default=True, nullable=False),
+ sql.Column('extra', ks_sql.JsonBlob.impl),
+ mysql_engine='InnoDB',
+ mysql_charset='utf8')
+
+ endpoint = sql.Table(
+ 'endpoint', meta,
+ sql.Column('id', sql.String(length=64), primary_key=True),
+ sql.Column('legacy_endpoint_id', sql.String(length=64)),
+ sql.Column('interface', sql.String(length=8), nullable=False),
+ sql.Column('region', sql.String(length=255)),
+ sql.Column('service_id', sql.String(length=64), nullable=False),
+ sql.Column('url', sql.Text, nullable=False),
+ sql.Column('extra', ks_sql.JsonBlob.impl),
+ sql.Column('enabled', sql.Boolean, nullable=False, default=True,
+ server_default='1'),
+ mysql_engine='InnoDB',
+ mysql_charset='utf8')
+
+ group = sql.Table(
+ 'group', meta,
+ sql.Column('id', sql.String(length=64), primary_key=True),
+ sql.Column('domain_id', sql.String(length=64), nullable=False),
+ sql.Column('name', sql.String(length=64), nullable=False),
+ sql.Column('description', sql.Text),
+ sql.Column('extra', ks_sql.JsonBlob.impl),
+ mysql_engine='InnoDB',
+ mysql_charset='utf8')
+
+ policy = sql.Table(
+ 'policy', meta,
+ sql.Column('id', sql.String(length=64), primary_key=True),
+ sql.Column('type', sql.String(length=255), nullable=False),
+ sql.Column('blob', ks_sql.JsonBlob, nullable=False),
+ sql.Column('extra', ks_sql.JsonBlob.impl),
+ mysql_engine='InnoDB',
+ mysql_charset='utf8')
+
+ project = sql.Table(
+ 'project', meta,
+ sql.Column('id', sql.String(length=64), primary_key=True),
+ sql.Column('name', sql.String(length=64), nullable=False),
+ sql.Column('extra', ks_sql.JsonBlob.impl),
+ sql.Column('description', sql.Text),
+ sql.Column('enabled', sql.Boolean),
+ sql.Column('domain_id', sql.String(length=64), nullable=False),
+ mysql_engine='InnoDB',
+ mysql_charset='utf8')
+
+ role = sql.Table(
+ 'role', meta,
+ sql.Column('id', sql.String(length=64), primary_key=True),
+ sql.Column('name', sql.String(length=255), nullable=False),
+ sql.Column('extra', ks_sql.JsonBlob.impl),
+ mysql_engine='InnoDB',
+ mysql_charset='utf8')
+
+ service = sql.Table(
+ 'service', meta,
+ sql.Column('id', sql.String(length=64), primary_key=True),
+ sql.Column('type', sql.String(length=255)),
+ sql.Column('enabled', sql.Boolean, nullable=False, default=True,
+ server_default='1'),
+ sql.Column('extra', ks_sql.JsonBlob.impl),
+ mysql_engine='InnoDB',
+ mysql_charset='utf8')
+
+ token = sql.Table(
+ 'token', meta,
+ sql.Column('id', sql.String(length=64), primary_key=True),
+ sql.Column('expires', sql.DateTime, default=None),
+ sql.Column('extra', ks_sql.JsonBlob.impl),
+ sql.Column('valid', sql.Boolean, default=True, nullable=False),
+ sql.Column('trust_id', sql.String(length=64)),
+ sql.Column('user_id', sql.String(length=64)),
+ mysql_engine='InnoDB',
+ mysql_charset='utf8')
+
+ trust = sql.Table(
+ 'trust', meta,
+ sql.Column('id', sql.String(length=64), primary_key=True),
+ sql.Column('trustor_user_id', sql.String(length=64), nullable=False),
+ sql.Column('trustee_user_id', sql.String(length=64), nullable=False),
+ sql.Column('project_id', sql.String(length=64)),
+ sql.Column('impersonation', sql.Boolean, nullable=False),
+ sql.Column('deleted_at', sql.DateTime),
+ sql.Column('expires_at', sql.DateTime),
+ sql.Column('remaining_uses', sql.Integer, nullable=True),
+ sql.Column('extra', ks_sql.JsonBlob.impl),
+ mysql_engine='InnoDB',
+ mysql_charset='utf8')
+
+ trust_role = sql.Table(
+ 'trust_role', meta,
+ sql.Column('trust_id', sql.String(length=64), primary_key=True,
+ nullable=False),
+ sql.Column('role_id', sql.String(length=64), primary_key=True,
+ nullable=False),
+ mysql_engine='InnoDB',
+ mysql_charset='utf8')
+
+ user = sql.Table(
+ 'user', meta,
+ sql.Column('id', sql.String(length=64), primary_key=True),
+ sql.Column('name', sql.String(length=255), nullable=False),
+ sql.Column('extra', ks_sql.JsonBlob.impl),
+ sql.Column('password', sql.String(length=128)),
+ sql.Column('enabled', sql.Boolean),
+ sql.Column('domain_id', sql.String(length=64), nullable=False),
+ sql.Column('default_project_id', sql.String(length=64)),
+ mysql_engine='InnoDB',
+ mysql_charset='utf8')
+
+ user_group_membership = sql.Table(
+ 'user_group_membership', meta,
+ sql.Column('user_id', sql.String(length=64), primary_key=True),
+ sql.Column('group_id', sql.String(length=64), primary_key=True),
+ mysql_engine='InnoDB',
+ mysql_charset='utf8')
+
+ region = sql.Table(
+ 'region',
+ meta,
+ sql.Column('id', sql.String(64), primary_key=True),
+ sql.Column('description', sql.String(255), nullable=False),
+ sql.Column('parent_region_id', sql.String(64), nullable=True),
+ sql.Column('extra', sql.Text()),
+ mysql_engine='InnoDB',
+ mysql_charset='utf8')
+
+ assignment = sql.Table(
+ 'assignment',
+ meta,
+ sql.Column('type', sql.Enum(
+ assignment_sql.AssignmentType.USER_PROJECT,
+ assignment_sql.AssignmentType.GROUP_PROJECT,
+ assignment_sql.AssignmentType.USER_DOMAIN,
+ assignment_sql.AssignmentType.GROUP_DOMAIN,
+ name='type'),
+ nullable=False),
+ sql.Column('actor_id', sql.String(64), nullable=False),
+ sql.Column('target_id', sql.String(64), nullable=False),
+ sql.Column('role_id', sql.String(64), nullable=False),
+ sql.Column('inherited', sql.Boolean, default=False, nullable=False),
+ sql.PrimaryKeyConstraint('type', 'actor_id', 'target_id', 'role_id'),
+ mysql_engine='InnoDB',
+ mysql_charset='utf8')
+
+ # create all tables
+ tables = [credential, domain, endpoint, group,
+ policy, project, role, service,
+ token, trust, trust_role, user,
+ user_group_membership, region, assignment]
+
+ for table in tables:
+ try:
+ table.create()
+ except Exception:
+ LOG.exception('Exception while creating table: %r', table)
+ raise
+
+ # Unique Constraints
+ migrate.UniqueConstraint(user.c.domain_id,
+ user.c.name,
+ name='ixu_user_name_domain_id').create()
+ migrate.UniqueConstraint(group.c.domain_id,
+ group.c.name,
+ name='ixu_group_name_domain_id').create()
+ migrate.UniqueConstraint(role.c.name,
+ name='ixu_role_name').create()
+ migrate.UniqueConstraint(project.c.domain_id,
+ project.c.name,
+ name='ixu_project_name_domain_id').create()
+ migrate.UniqueConstraint(domain.c.name,
+ name='ixu_domain_name').create()
+
+ # Indexes
+ sql.Index('ix_token_expires', token.c.expires).create()
+ sql.Index('ix_token_expires_valid', token.c.expires,
+ token.c.valid).create()
+
+ fkeys = [
+ {'columns': [endpoint.c.service_id],
+ 'references': [service.c.id]},
+
+ {'columns': [user_group_membership.c.group_id],
+ 'references': [group.c.id],
+ 'name': 'fk_user_group_membership_group_id'},
+
+ {'columns': [user_group_membership.c.user_id],
+ 'references':[user.c.id],
+ 'name': 'fk_user_group_membership_user_id'},
+
+ {'columns': [user.c.domain_id],
+ 'references': [domain.c.id],
+ 'name': 'fk_user_domain_id'},
+
+ {'columns': [group.c.domain_id],
+ 'references': [domain.c.id],
+ 'name': 'fk_group_domain_id'},
+
+ {'columns': [project.c.domain_id],
+ 'references': [domain.c.id],
+ 'name': 'fk_project_domain_id'},
+
+ {'columns': [assignment.c.role_id],
+ 'references': [role.c.id]}
+ ]
+
+ for fkey in fkeys:
+ migrate.ForeignKeyConstraint(columns=fkey['columns'],
+ refcolumns=fkey['references'],
+ name=fkey.get('name')).create()
+
+ # Create the default domain.
+ session = orm.sessionmaker(bind=migrate_engine)()
+ domain.insert(migration_helpers.get_default_domain()).execute()
+ session.commit()
+
+
+def downgrade(migrate_engine):
+ raise NotImplementedError('Downgrade to pre-Icehouse release db schema is '
+ 'unsupported.')
diff --git a/keystone-moon/keystone/common/sql/migrate_repo/versions/045_placeholder.py b/keystone-moon/keystone/common/sql/migrate_repo/versions/045_placeholder.py
new file mode 100644
index 00000000..b6f40719
--- /dev/null
+++ b/keystone-moon/keystone/common/sql/migrate_repo/versions/045_placeholder.py
@@ -0,0 +1,25 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+# This is a placeholder for Icehouse backports. Do not use this number for new
+# Juno work. New Juno work starts after all the placeholders.
+#
+# See blueprint reserved-db-migrations-icehouse and the related discussion:
+# http://lists.openstack.org/pipermail/openstack-dev/2013-March/006827.html
+
+
+def upgrade(migrate_engine):
+ pass
+
+
+def downgrade(migration_engine):
+ pass
diff --git a/keystone-moon/keystone/common/sql/migrate_repo/versions/046_placeholder.py b/keystone-moon/keystone/common/sql/migrate_repo/versions/046_placeholder.py
new file mode 100644
index 00000000..b6f40719
--- /dev/null
+++ b/keystone-moon/keystone/common/sql/migrate_repo/versions/046_placeholder.py
@@ -0,0 +1,25 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+# This is a placeholder for Icehouse backports. Do not use this number for new
+# Juno work. New Juno work starts after all the placeholders.
+#
+# See blueprint reserved-db-migrations-icehouse and the related discussion:
+# http://lists.openstack.org/pipermail/openstack-dev/2013-March/006827.html
+
+
+def upgrade(migrate_engine):
+ pass
+
+
+def downgrade(migration_engine):
+ pass
diff --git a/keystone-moon/keystone/common/sql/migrate_repo/versions/047_placeholder.py b/keystone-moon/keystone/common/sql/migrate_repo/versions/047_placeholder.py
new file mode 100644
index 00000000..b6f40719
--- /dev/null
+++ b/keystone-moon/keystone/common/sql/migrate_repo/versions/047_placeholder.py
@@ -0,0 +1,25 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+# This is a placeholder for Icehouse backports. Do not use this number for new
+# Juno work. New Juno work starts after all the placeholders.
+#
+# See blueprint reserved-db-migrations-icehouse and the related discussion:
+# http://lists.openstack.org/pipermail/openstack-dev/2013-March/006827.html
+
+
+def upgrade(migrate_engine):
+ pass
+
+
+def downgrade(migration_engine):
+ pass
diff --git a/keystone-moon/keystone/common/sql/migrate_repo/versions/048_placeholder.py b/keystone-moon/keystone/common/sql/migrate_repo/versions/048_placeholder.py
new file mode 100644
index 00000000..b6f40719
--- /dev/null
+++ b/keystone-moon/keystone/common/sql/migrate_repo/versions/048_placeholder.py
@@ -0,0 +1,25 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+# This is a placeholder for Icehouse backports. Do not use this number for new
+# Juno work. New Juno work starts after all the placeholders.
+#
+# See blueprint reserved-db-migrations-icehouse and the related discussion:
+# http://lists.openstack.org/pipermail/openstack-dev/2013-March/006827.html
+
+
+def upgrade(migrate_engine):
+ pass
+
+
+def downgrade(migration_engine):
+ pass
diff --git a/keystone-moon/keystone/common/sql/migrate_repo/versions/049_placeholder.py b/keystone-moon/keystone/common/sql/migrate_repo/versions/049_placeholder.py
new file mode 100644
index 00000000..b6f40719
--- /dev/null
+++ b/keystone-moon/keystone/common/sql/migrate_repo/versions/049_placeholder.py
@@ -0,0 +1,25 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+# This is a placeholder for Icehouse backports. Do not use this number for new
+# Juno work. New Juno work starts after all the placeholders.
+#
+# See blueprint reserved-db-migrations-icehouse and the related discussion:
+# http://lists.openstack.org/pipermail/openstack-dev/2013-March/006827.html
+
+
+def upgrade(migrate_engine):
+ pass
+
+
+def downgrade(migration_engine):
+ pass
diff --git a/keystone-moon/keystone/common/sql/migrate_repo/versions/050_fk_consistent_indexes.py b/keystone-moon/keystone/common/sql/migrate_repo/versions/050_fk_consistent_indexes.py
new file mode 100644
index 00000000..535a0944
--- /dev/null
+++ b/keystone-moon/keystone/common/sql/migrate_repo/versions/050_fk_consistent_indexes.py
@@ -0,0 +1,49 @@
+# Copyright 2014 Mirantis.inc
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import sqlalchemy as sa
+
+
+def upgrade(migrate_engine):
+
+ if migrate_engine.name == 'mysql':
+ meta = sa.MetaData(bind=migrate_engine)
+ endpoint = sa.Table('endpoint', meta, autoload=True)
+
+ # NOTE(i159): MySQL requires indexes on referencing columns, and those
+ # indexes create automatically. That those indexes will have different
+ # names, depending on version of MySQL used. We shoud make this naming
+ # consistent, by reverting index name to a consistent condition.
+ if any(i for i in endpoint.indexes if
+ i.columns.keys() == ['service_id'] and i.name != 'service_id'):
+ # NOTE(i159): by this action will be made re-creation of an index
+ # with the new name. This can be considered as renaming under the
+ # MySQL rules.
+ sa.Index('service_id', endpoint.c.service_id).create()
+
+ user_group_membership = sa.Table('user_group_membership',
+ meta, autoload=True)
+
+ if any(i for i in user_group_membership.indexes if
+ i.columns.keys() == ['group_id'] and i.name != 'group_id'):
+ sa.Index('group_id', user_group_membership.c.group_id).create()
+
+
+def downgrade(migrate_engine):
+ # NOTE(i159): index exists only in MySQL schemas, and got an inconsistent
+ # name only when MySQL 5.5 renamed it after re-creation
+ # (during migrations). So we just fixed inconsistency, there is no
+ # necessity to revert it.
+ pass
diff --git a/keystone-moon/keystone/common/sql/migrate_repo/versions/051_add_id_mapping.py b/keystone-moon/keystone/common/sql/migrate_repo/versions/051_add_id_mapping.py
new file mode 100644
index 00000000..074fbb63
--- /dev/null
+++ b/keystone-moon/keystone/common/sql/migrate_repo/versions/051_add_id_mapping.py
@@ -0,0 +1,49 @@
+# Copyright 2014 IBM Corp.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import sqlalchemy as sql
+
+from keystone.identity.mapping_backends import mapping
+
+
+MAPPING_TABLE = 'id_mapping'
+
+
+def upgrade(migrate_engine):
+ meta = sql.MetaData()
+ meta.bind = migrate_engine
+
+ mapping_table = sql.Table(
+ MAPPING_TABLE,
+ meta,
+ sql.Column('public_id', sql.String(64), primary_key=True),
+ sql.Column('domain_id', sql.String(64), nullable=False),
+ sql.Column('local_id', sql.String(64), nullable=False),
+ sql.Column('entity_type', sql.Enum(
+ mapping.EntityType.USER,
+ mapping.EntityType.GROUP,
+ name='entity_type'),
+ nullable=False),
+ sql.UniqueConstraint('domain_id', 'local_id', 'entity_type'),
+ mysql_engine='InnoDB',
+ mysql_charset='utf8')
+ mapping_table.create(migrate_engine, checkfirst=True)
+
+
+def downgrade(migrate_engine):
+ meta = sql.MetaData()
+ meta.bind = migrate_engine
+
+ assignment = sql.Table(MAPPING_TABLE, meta, autoload=True)
+ assignment.drop(migrate_engine, checkfirst=True)
diff --git a/keystone-moon/keystone/common/sql/migrate_repo/versions/052_add_auth_url_to_region.py b/keystone-moon/keystone/common/sql/migrate_repo/versions/052_add_auth_url_to_region.py
new file mode 100644
index 00000000..9f1fd9f0
--- /dev/null
+++ b/keystone-moon/keystone/common/sql/migrate_repo/versions/052_add_auth_url_to_region.py
@@ -0,0 +1,34 @@
+# Copyright 2014 IBM Corp.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import sqlalchemy as sql
+
+_REGION_TABLE_NAME = 'region'
+
+
+def upgrade(migrate_engine):
+ meta = sql.MetaData()
+ meta.bind = migrate_engine
+
+ region_table = sql.Table(_REGION_TABLE_NAME, meta, autoload=True)
+ url_column = sql.Column('url', sql.String(255), nullable=True)
+ region_table.create_column(url_column)
+
+
+def downgrade(migrate_engine):
+ meta = sql.MetaData()
+ meta.bind = migrate_engine
+
+ region_table = sql.Table(_REGION_TABLE_NAME, meta, autoload=True)
+ region_table.drop_column('url')
diff --git a/keystone-moon/keystone/common/sql/migrate_repo/versions/053_endpoint_to_region_association.py b/keystone-moon/keystone/common/sql/migrate_repo/versions/053_endpoint_to_region_association.py
new file mode 100644
index 00000000..6dc0004f
--- /dev/null
+++ b/keystone-moon/keystone/common/sql/migrate_repo/versions/053_endpoint_to_region_association.py
@@ -0,0 +1,156 @@
+# Copyright (c) 2013 Hewlett-Packard Development Company, L.P
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+
+"""Migrated the endpoint 'region' column to 'region_id.
+
+In addition to the rename, the new column is made a foreign key to the
+respective 'region' in the region table, ensuring that we auto-create
+any regions that are missing. Further, since the old region column
+was 255 chars, and the id column in the region table is 64 chars, the size
+of the id column in the region table is increased to match.
+
+To Upgrade:
+
+
+Region Table
+
+Increase the size of the if column in the region table
+
+Endpoint Table
+
+a. Add the endpoint region_id column, that is a foreign key to the region table
+b. For each endpoint
+ i. Ensure there is matching region in region table, and if not, create it
+ ii. Assign the id to the region_id column
+c. Remove the column region
+
+
+To Downgrade:
+
+Endpoint Table
+
+a. Add back in the region column
+b. For each endpoint
+ i. Copy the region_id column to the region column
+c. Remove the column region_id
+
+Region Table
+
+Decrease the size of the id column in the region table, making sure that
+we don't get classing primary keys.
+
+"""
+
+import migrate
+import six
+import sqlalchemy as sql
+from sqlalchemy.orm import sessionmaker
+
+
+def _migrate_to_region_id(migrate_engine, region_table, endpoint_table):
+ endpoints = list(endpoint_table.select().execute())
+
+ for endpoint in endpoints:
+ if endpoint.region is None:
+ continue
+
+ region = list(region_table.select(
+ whereclause=region_table.c.id == endpoint.region).execute())
+ if len(region) == 1:
+ region_id = region[0].id
+ else:
+ region_id = endpoint.region
+ region = {'id': region_id,
+ 'description': '',
+ 'extra': '{}'}
+ session = sessionmaker(bind=migrate_engine)()
+ region_table.insert(region).execute()
+ session.commit()
+
+ new_values = {'region_id': region_id}
+ f = endpoint_table.c.id == endpoint.id
+ update = endpoint_table.update().where(f).values(new_values)
+ migrate_engine.execute(update)
+
+ migrate.ForeignKeyConstraint(
+ columns=[endpoint_table.c.region_id],
+ refcolumns=[region_table.c.id],
+ name='fk_endpoint_region_id').create()
+
+
+def _migrate_to_region(migrate_engine, region_table, endpoint_table):
+ endpoints = list(endpoint_table.select().execute())
+
+ for endpoint in endpoints:
+ new_values = {'region': endpoint.region_id}
+ f = endpoint_table.c.id == endpoint.id
+ update = endpoint_table.update().where(f).values(new_values)
+ migrate_engine.execute(update)
+
+ if 'sqlite' != migrate_engine.name:
+ migrate.ForeignKeyConstraint(
+ columns=[endpoint_table.c.region_id],
+ refcolumns=[region_table.c.id],
+ name='fk_endpoint_region_id').drop()
+ endpoint_table.c.region_id.drop()
+
+
+def _prepare_regions_for_id_truncation(migrate_engine, region_table):
+ """Ensure there are no IDs that are bigger than 64 chars.
+
+ The size of the id and parent_id fields where increased from 64 to 255
+ during the upgrade. On downgrade we have to make sure that the ids can
+ fit in the new column size. For rows with ids greater than this, we have
+ no choice but to dump them.
+
+ """
+ for region in list(region_table.select().execute()):
+ if (len(six.text_type(region.id)) > 64 or
+ len(six.text_type(region.parent_region_id)) > 64):
+ delete = region_table.delete(region_table.c.id == region.id)
+ migrate_engine.execute(delete)
+
+
+def upgrade(migrate_engine):
+ meta = sql.MetaData()
+ meta.bind = migrate_engine
+
+ region_table = sql.Table('region', meta, autoload=True)
+ region_table.c.id.alter(type=sql.String(length=255))
+ region_table.c.parent_region_id.alter(type=sql.String(length=255))
+ endpoint_table = sql.Table('endpoint', meta, autoload=True)
+ region_id_column = sql.Column('region_id',
+ sql.String(length=255), nullable=True)
+ region_id_column.create(endpoint_table)
+
+ _migrate_to_region_id(migrate_engine, region_table, endpoint_table)
+
+ endpoint_table.c.region.drop()
+
+
+def downgrade(migrate_engine):
+ meta = sql.MetaData()
+ meta.bind = migrate_engine
+
+ region_table = sql.Table('region', meta, autoload=True)
+ endpoint_table = sql.Table('endpoint', meta, autoload=True)
+ region_column = sql.Column('region', sql.String(length=255))
+ region_column.create(endpoint_table)
+
+ _migrate_to_region(migrate_engine, region_table, endpoint_table)
+ _prepare_regions_for_id_truncation(migrate_engine, region_table)
+
+ region_table.c.id.alter(type=sql.String(length=64))
+ region_table.c.parent_region_id.alter(type=sql.String(length=64))
diff --git a/keystone-moon/keystone/common/sql/migrate_repo/versions/054_add_actor_id_index.py b/keystone-moon/keystone/common/sql/migrate_repo/versions/054_add_actor_id_index.py
new file mode 100644
index 00000000..33b13b7d
--- /dev/null
+++ b/keystone-moon/keystone/common/sql/migrate_repo/versions/054_add_actor_id_index.py
@@ -0,0 +1,35 @@
+# Copyright 2014 IBM Corp.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import sqlalchemy as sql
+
+ASSIGNMENT_TABLE = 'assignment'
+
+
+def upgrade(migrate_engine):
+ meta = sql.MetaData()
+ meta.bind = migrate_engine
+
+ assignment = sql.Table(ASSIGNMENT_TABLE, meta, autoload=True)
+ idx = sql.Index('ix_actor_id', assignment.c.actor_id)
+ idx.create(migrate_engine)
+
+
+def downgrade(migrate_engine):
+ meta = sql.MetaData()
+ meta.bind = migrate_engine
+
+ assignment = sql.Table(ASSIGNMENT_TABLE, meta, autoload=True)
+ idx = sql.Index('ix_actor_id', assignment.c.actor_id)
+ idx.drop(migrate_engine)
diff --git a/keystone-moon/keystone/common/sql/migrate_repo/versions/055_add_indexes_to_token_table.py b/keystone-moon/keystone/common/sql/migrate_repo/versions/055_add_indexes_to_token_table.py
new file mode 100644
index 00000000..1cfddd3f
--- /dev/null
+++ b/keystone-moon/keystone/common/sql/migrate_repo/versions/055_add_indexes_to_token_table.py
@@ -0,0 +1,35 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""Add indexes to `user_id` and `trust_id` columns for the `token` table."""
+
+import sqlalchemy as sql
+
+
+def upgrade(migrate_engine):
+ meta = sql.MetaData()
+ meta.bind = migrate_engine
+
+ token = sql.Table('token', meta, autoload=True)
+
+ sql.Index('ix_token_user_id', token.c.user_id).create()
+ sql.Index('ix_token_trust_id', token.c.trust_id).create()
+
+
+def downgrade(migrate_engine):
+ meta = sql.MetaData()
+ meta.bind = migrate_engine
+
+ token = sql.Table('token', meta, autoload=True)
+
+ sql.Index('ix_token_user_id', token.c.user_id).drop()
+ sql.Index('ix_token_trust_id', token.c.trust_id).drop()
diff --git a/keystone-moon/keystone/common/sql/migrate_repo/versions/056_placeholder.py b/keystone-moon/keystone/common/sql/migrate_repo/versions/056_placeholder.py
new file mode 100644
index 00000000..5f82254f
--- /dev/null
+++ b/keystone-moon/keystone/common/sql/migrate_repo/versions/056_placeholder.py
@@ -0,0 +1,22 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+# This is a placeholder for Juno backports. Do not use this number for new
+# Kilo work. New Kilo work starts after all the placeholders.
+
+
+def upgrade(migrate_engine):
+ pass
+
+
+def downgrade(migration_engine):
+ pass
diff --git a/keystone-moon/keystone/common/sql/migrate_repo/versions/057_placeholder.py b/keystone-moon/keystone/common/sql/migrate_repo/versions/057_placeholder.py
new file mode 100644
index 00000000..5f82254f
--- /dev/null
+++ b/keystone-moon/keystone/common/sql/migrate_repo/versions/057_placeholder.py
@@ -0,0 +1,22 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+# This is a placeholder for Juno backports. Do not use this number for new
+# Kilo work. New Kilo work starts after all the placeholders.
+
+
+def upgrade(migrate_engine):
+ pass
+
+
+def downgrade(migration_engine):
+ pass
diff --git a/keystone-moon/keystone/common/sql/migrate_repo/versions/058_placeholder.py b/keystone-moon/keystone/common/sql/migrate_repo/versions/058_placeholder.py
new file mode 100644
index 00000000..5f82254f
--- /dev/null
+++ b/keystone-moon/keystone/common/sql/migrate_repo/versions/058_placeholder.py
@@ -0,0 +1,22 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+# This is a placeholder for Juno backports. Do not use this number for new
+# Kilo work. New Kilo work starts after all the placeholders.
+
+
+def upgrade(migrate_engine):
+ pass
+
+
+def downgrade(migration_engine):
+ pass
diff --git a/keystone-moon/keystone/common/sql/migrate_repo/versions/059_placeholder.py b/keystone-moon/keystone/common/sql/migrate_repo/versions/059_placeholder.py
new file mode 100644
index 00000000..5f82254f
--- /dev/null
+++ b/keystone-moon/keystone/common/sql/migrate_repo/versions/059_placeholder.py
@@ -0,0 +1,22 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+# This is a placeholder for Juno backports. Do not use this number for new
+# Kilo work. New Kilo work starts after all the placeholders.
+
+
+def upgrade(migrate_engine):
+ pass
+
+
+def downgrade(migration_engine):
+ pass
diff --git a/keystone-moon/keystone/common/sql/migrate_repo/versions/060_placeholder.py b/keystone-moon/keystone/common/sql/migrate_repo/versions/060_placeholder.py
new file mode 100644
index 00000000..5f82254f
--- /dev/null
+++ b/keystone-moon/keystone/common/sql/migrate_repo/versions/060_placeholder.py
@@ -0,0 +1,22 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+# This is a placeholder for Juno backports. Do not use this number for new
+# Kilo work. New Kilo work starts after all the placeholders.
+
+
+def upgrade(migrate_engine):
+ pass
+
+
+def downgrade(migration_engine):
+ pass
diff --git a/keystone-moon/keystone/common/sql/migrate_repo/versions/061_add_parent_project.py b/keystone-moon/keystone/common/sql/migrate_repo/versions/061_add_parent_project.py
new file mode 100644
index 00000000..bb8ef9f6
--- /dev/null
+++ b/keystone-moon/keystone/common/sql/migrate_repo/versions/061_add_parent_project.py
@@ -0,0 +1,54 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import sqlalchemy as sql
+
+from keystone.common.sql import migration_helpers
+
+_PROJECT_TABLE_NAME = 'project'
+_PARENT_ID_COLUMN_NAME = 'parent_id'
+
+
+def list_constraints(project_table):
+ constraints = [{'table': project_table,
+ 'fk_column': _PARENT_ID_COLUMN_NAME,
+ 'ref_column': project_table.c.id}]
+
+ return constraints
+
+
+def upgrade(migrate_engine):
+ meta = sql.MetaData()
+ meta.bind = migrate_engine
+
+ project_table = sql.Table(_PROJECT_TABLE_NAME, meta, autoload=True)
+ parent_id = sql.Column(_PARENT_ID_COLUMN_NAME, sql.String(64),
+ nullable=True)
+ project_table.create_column(parent_id)
+
+ if migrate_engine.name == 'sqlite':
+ return
+ migration_helpers.add_constraints(list_constraints(project_table))
+
+
+def downgrade(migrate_engine):
+ meta = sql.MetaData()
+ meta.bind = migrate_engine
+
+ project_table = sql.Table(_PROJECT_TABLE_NAME, meta, autoload=True)
+
+ # SQLite does not support constraints, and querying the constraints
+ # raises an exception
+ if migrate_engine.name != 'sqlite':
+ migration_helpers.remove_constraints(list_constraints(project_table))
+
+ project_table.drop_column(_PARENT_ID_COLUMN_NAME)
diff --git a/keystone-moon/keystone/common/sql/migrate_repo/versions/062_drop_assignment_role_fk.py b/keystone-moon/keystone/common/sql/migrate_repo/versions/062_drop_assignment_role_fk.py
new file mode 100644
index 00000000..5a33486c
--- /dev/null
+++ b/keystone-moon/keystone/common/sql/migrate_repo/versions/062_drop_assignment_role_fk.py
@@ -0,0 +1,41 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import sqlalchemy
+
+from keystone.common.sql import migration_helpers
+
+
+def list_constraints(migrate_engine):
+ meta = sqlalchemy.MetaData()
+ meta.bind = migrate_engine
+ assignment_table = sqlalchemy.Table('assignment', meta, autoload=True)
+ role_table = sqlalchemy.Table('role', meta, autoload=True)
+
+ constraints = [{'table': assignment_table,
+ 'fk_column': 'role_id',
+ 'ref_column': role_table.c.id}]
+ return constraints
+
+
+def upgrade(migrate_engine):
+ # SQLite does not support constraints, and querying the constraints
+ # raises an exception
+ if migrate_engine.name == 'sqlite':
+ return
+ migration_helpers.remove_constraints(list_constraints(migrate_engine))
+
+
+def downgrade(migrate_engine):
+ if migrate_engine.name == 'sqlite':
+ return
+ migration_helpers.add_constraints(list_constraints(migrate_engine))
diff --git a/keystone-moon/keystone/common/sql/migrate_repo/versions/063_drop_region_auth_url.py b/keystone-moon/keystone/common/sql/migrate_repo/versions/063_drop_region_auth_url.py
new file mode 100644
index 00000000..109a8412
--- /dev/null
+++ b/keystone-moon/keystone/common/sql/migrate_repo/versions/063_drop_region_auth_url.py
@@ -0,0 +1,32 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import sqlalchemy as sql
+
+_REGION_TABLE_NAME = 'region'
+
+
+def upgrade(migrate_engine):
+ meta = sql.MetaData()
+ meta.bind = migrate_engine
+
+ region_table = sql.Table(_REGION_TABLE_NAME, meta, autoload=True)
+ region_table.drop_column('url')
+
+
+def downgrade(migrate_engine):
+ meta = sql.MetaData()
+ meta.bind = migrate_engine
+
+ region_table = sql.Table(_REGION_TABLE_NAME, meta, autoload=True)
+ url_column = sql.Column('url', sql.String(255), nullable=True)
+ region_table.create_column(url_column)
diff --git a/keystone-moon/keystone/common/sql/migrate_repo/versions/064_drop_user_and_group_fk.py b/keystone-moon/keystone/common/sql/migrate_repo/versions/064_drop_user_and_group_fk.py
new file mode 100644
index 00000000..bca00902
--- /dev/null
+++ b/keystone-moon/keystone/common/sql/migrate_repo/versions/064_drop_user_and_group_fk.py
@@ -0,0 +1,45 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import sqlalchemy
+
+from keystone.common.sql import migration_helpers
+
+
+def list_constraints(migrate_engine):
+ meta = sqlalchemy.MetaData()
+ meta.bind = migrate_engine
+ user_table = sqlalchemy.Table('user', meta, autoload=True)
+ group_table = sqlalchemy.Table('group', meta, autoload=True)
+ domain_table = sqlalchemy.Table('domain', meta, autoload=True)
+
+ constraints = [{'table': user_table,
+ 'fk_column': 'domain_id',
+ 'ref_column': domain_table.c.id},
+ {'table': group_table,
+ 'fk_column': 'domain_id',
+ 'ref_column': domain_table.c.id}]
+ return constraints
+
+
+def upgrade(migrate_engine):
+ # SQLite does not support constraints, and querying the constraints
+ # raises an exception
+ if migrate_engine.name == 'sqlite':
+ return
+ migration_helpers.remove_constraints(list_constraints(migrate_engine))
+
+
+def downgrade(migrate_engine):
+ if migrate_engine.name == 'sqlite':
+ return
+ migration_helpers.add_constraints(list_constraints(migrate_engine))
diff --git a/keystone-moon/keystone/common/sql/migrate_repo/versions/065_add_domain_config.py b/keystone-moon/keystone/common/sql/migrate_repo/versions/065_add_domain_config.py
new file mode 100644
index 00000000..fd8717d2
--- /dev/null
+++ b/keystone-moon/keystone/common/sql/migrate_repo/versions/065_add_domain_config.py
@@ -0,0 +1,55 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import sqlalchemy as sql
+
+from keystone.common import sql as ks_sql
+
+WHITELIST_TABLE = 'whitelisted_config'
+SENSITIVE_TABLE = 'sensitive_config'
+
+
+def upgrade(migrate_engine):
+ meta = sql.MetaData()
+ meta.bind = migrate_engine
+
+ whitelist_table = sql.Table(
+ WHITELIST_TABLE,
+ meta,
+ sql.Column('domain_id', sql.String(64), primary_key=True),
+ sql.Column('group', sql.String(255), primary_key=True),
+ sql.Column('option', sql.String(255), primary_key=True),
+ sql.Column('value', ks_sql.JsonBlob.impl, nullable=False),
+ mysql_engine='InnoDB',
+ mysql_charset='utf8')
+ whitelist_table.create(migrate_engine, checkfirst=True)
+
+ sensitive_table = sql.Table(
+ SENSITIVE_TABLE,
+ meta,
+ sql.Column('domain_id', sql.String(64), primary_key=True),
+ sql.Column('group', sql.String(255), primary_key=True),
+ sql.Column('option', sql.String(255), primary_key=True),
+ sql.Column('value', ks_sql.JsonBlob.impl, nullable=False),
+ mysql_engine='InnoDB',
+ mysql_charset='utf8')
+ sensitive_table.create(migrate_engine, checkfirst=True)
+
+
+def downgrade(migrate_engine):
+ meta = sql.MetaData()
+ meta.bind = migrate_engine
+
+ table = sql.Table(WHITELIST_TABLE, meta, autoload=True)
+ table.drop(migrate_engine, checkfirst=True)
+ table = sql.Table(SENSITIVE_TABLE, meta, autoload=True)
+ table.drop(migrate_engine, checkfirst=True)
diff --git a/keystone-moon/keystone/common/sql/migrate_repo/versions/066_fixup_service_name_value.py b/keystone-moon/keystone/common/sql/migrate_repo/versions/066_fixup_service_name_value.py
new file mode 100644
index 00000000..3feadc53
--- /dev/null
+++ b/keystone-moon/keystone/common/sql/migrate_repo/versions/066_fixup_service_name_value.py
@@ -0,0 +1,43 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from oslo_serialization import jsonutils
+import sqlalchemy as sql
+
+
+def upgrade(migrate_engine):
+ meta = sql.MetaData()
+ meta.bind = migrate_engine
+
+ service_table = sql.Table('service', meta, autoload=True)
+ services = list(service_table.select().execute())
+
+ for service in services:
+ extra_dict = jsonutils.loads(service.extra)
+ # Skip records where service is not null
+ if extra_dict.get('name') is not None:
+ continue
+ # Default the name to empty string
+ extra_dict['name'] = ''
+ new_values = {
+ 'extra': jsonutils.dumps(extra_dict),
+ }
+ f = service_table.c.id == service.id
+ update = service_table.update().where(f).values(new_values)
+ migrate_engine.execute(update)
+
+
+def downgrade(migration_engine):
+ # The upgrade fixes the data inconsistency for the service name,
+ # it defaults the value to empty string. There is no necessity
+ # to revert it.
+ pass
diff --git a/keystone-moon/keystone/common/sql/migrate_repo/versions/__init__.py b/keystone-moon/keystone/common/sql/migrate_repo/versions/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/keystone-moon/keystone/common/sql/migrate_repo/versions/__init__.py
diff --git a/keystone-moon/keystone/common/sql/migration_helpers.py b/keystone-moon/keystone/common/sql/migration_helpers.py
new file mode 100644
index 00000000..86932995
--- /dev/null
+++ b/keystone-moon/keystone/common/sql/migration_helpers.py
@@ -0,0 +1,258 @@
+# Copyright 2013 OpenStack Foundation
+# Copyright 2013 Red Hat, Inc.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import os
+import sys
+
+import migrate
+from migrate import exceptions
+from oslo_config import cfg
+from oslo_db.sqlalchemy import migration
+from oslo_serialization import jsonutils
+from oslo_utils import importutils
+import six
+import sqlalchemy
+
+from keystone.common import sql
+from keystone.common.sql import migrate_repo
+from keystone import contrib
+from keystone import exception
+from keystone.i18n import _
+
+
+CONF = cfg.CONF
+DEFAULT_EXTENSIONS = ['endpoint_filter',
+ 'endpoint_policy',
+ 'federation',
+ 'oauth1',
+ 'revoke',
+ ]
+
+
+def get_default_domain():
+ # Return the reference used for the default domain structure during
+ # sql migrations.
+ return {
+ 'id': CONF.identity.default_domain_id,
+ 'name': 'Default',
+ 'enabled': True,
+ 'extra': jsonutils.dumps({'description': 'Owns users and tenants '
+ '(i.e. projects) available '
+ 'on Identity API v2.'})}
+
+
+# Different RDBMSs use different schemes for naming the Foreign Key
+# Constraints. SQLAlchemy does not yet attempt to determine the name
+# for the constraint, and instead attempts to deduce it from the column.
+# This fails on MySQL.
+def get_constraints_names(table, column_name):
+ fkeys = [fk.name for fk in table.constraints
+ if (isinstance(fk, sqlalchemy.ForeignKeyConstraint) and
+ column_name in fk.columns)]
+ return fkeys
+
+
+# remove_constraints and add_constraints both accept a list of dictionaries
+# that contain:
+# {'table': a sqlalchemy table. The constraint is added to dropped from
+# this table.
+# 'fk_column': the name of a column on the above table, The constraint
+# is added to or dropped from this column
+# 'ref_column':a sqlalchemy column object. This is the reference column
+# for the constraint.
+def remove_constraints(constraints):
+ for constraint_def in constraints:
+ constraint_names = get_constraints_names(constraint_def['table'],
+ constraint_def['fk_column'])
+ for constraint_name in constraint_names:
+ migrate.ForeignKeyConstraint(
+ columns=[getattr(constraint_def['table'].c,
+ constraint_def['fk_column'])],
+ refcolumns=[constraint_def['ref_column']],
+ name=constraint_name).drop()
+
+
+def add_constraints(constraints):
+ for constraint_def in constraints:
+
+ if constraint_def['table'].kwargs.get('mysql_engine') == 'MyISAM':
+ # Don't try to create constraint when using MyISAM because it's
+ # not supported.
+ continue
+
+ ref_col = constraint_def['ref_column']
+ ref_engine = ref_col.table.kwargs.get('mysql_engine')
+ if ref_engine == 'MyISAM':
+ # Don't try to create constraint when using MyISAM because it's
+ # not supported.
+ continue
+
+ migrate.ForeignKeyConstraint(
+ columns=[getattr(constraint_def['table'].c,
+ constraint_def['fk_column'])],
+ refcolumns=[constraint_def['ref_column']]).create()
+
+
+def rename_tables_with_constraints(renames, constraints, engine):
+ """Renames tables with foreign key constraints.
+
+ Tables are renamed after first removing constraints. The constraints are
+ replaced after the rename is complete.
+
+ This works on databases that don't support renaming tables that have
+ constraints on them (DB2).
+
+ `renames` is a dict, mapping {'to_table_name': from_table, ...}
+ """
+
+ if engine.name != 'sqlite':
+ # Sqlite doesn't support constraints, so nothing to remove.
+ remove_constraints(constraints)
+
+ for to_table_name in renames:
+ from_table = renames[to_table_name]
+ from_table.rename(to_table_name)
+
+ if engine != 'sqlite':
+ add_constraints(constraints)
+
+
+def find_migrate_repo(package=None, repo_name='migrate_repo'):
+ package = package or sql
+ path = os.path.abspath(os.path.join(
+ os.path.dirname(package.__file__), repo_name))
+ if os.path.isdir(path):
+ return path
+ raise exception.MigrationNotProvided(package.__name__, path)
+
+
+def _sync_common_repo(version):
+ abs_path = find_migrate_repo()
+ init_version = migrate_repo.DB_INIT_VERSION
+ engine = sql.get_engine()
+ migration.db_sync(engine, abs_path, version=version,
+ init_version=init_version)
+
+
+def _fix_federation_tables(engine):
+ """Fix the identity_provider, federation_protocol and mapping tables
+ to be InnoDB and Charset UTF8.
+
+ This function is to work around bug #1426334. This has occurred because
+ the original migration did not specify InnoDB and charset utf8. Due
+ to the sanity_check, a deployer can get wedged here and require manual
+ database changes to fix.
+ """
+ # NOTE(marco-fargetta) This is a workaround to "fix" that tables only
+ # if we're under MySQL
+ if engine.name == 'mysql':
+ # * Disable any check for the foreign keys because they prevent the
+ # alter table to execute
+ engine.execute("SET foreign_key_checks = 0")
+ # * Make the tables using InnoDB engine
+ engine.execute("ALTER TABLE identity_provider Engine=InnoDB")
+ engine.execute("ALTER TABLE federation_protocol Engine=InnoDB")
+ engine.execute("ALTER TABLE mapping Engine=InnoDB")
+ # * Make the tables using utf8 encoding
+ engine.execute("ALTER TABLE identity_provider "
+ "CONVERT TO CHARACTER SET utf8")
+ engine.execute("ALTER TABLE federation_protocol "
+ "CONVERT TO CHARACTER SET utf8")
+ engine.execute("ALTER TABLE mapping CONVERT TO CHARACTER SET utf8")
+ # * Revert the foreign keys check back
+ engine.execute("SET foreign_key_checks = 1")
+
+
+def _sync_extension_repo(extension, version):
+ init_version = 0
+ engine = sql.get_engine()
+
+ try:
+ package_name = '.'.join((contrib.__name__, extension))
+ package = importutils.import_module(package_name)
+ except ImportError:
+ raise ImportError(_("%s extension does not exist.")
+ % package_name)
+ try:
+ abs_path = find_migrate_repo(package)
+ try:
+ migration.db_version_control(sql.get_engine(), abs_path)
+ # Register the repo with the version control API
+ # If it already knows about the repo, it will throw
+ # an exception that we can safely ignore
+ except exceptions.DatabaseAlreadyControlledError:
+ pass
+ except exception.MigrationNotProvided as e:
+ print(e)
+ sys.exit(1)
+ try:
+ migration.db_sync(engine, abs_path, version=version,
+ init_version=init_version)
+ except ValueError:
+ # NOTE(marco-fargetta): ValueError is raised from the sanity check (
+ # verifies that tables are utf8 under mysql). The federation_protocol,
+ # identity_provider and mapping tables were not initially built with
+ # InnoDB and utf8 as part of the table arguments when the migration
+ # was initially created. Bug #1426334 is a scenario where the deployer
+ # can get wedged, unable to upgrade or downgrade.
+ # This is a workaround to "fix" those tables if we're under MySQL and
+ # the version is before the 6 because before the tables were introduced
+ # before and patched when migration 5 was available
+ if engine.name == 'mysql' and \
+ int(six.text_type(get_db_version(extension))) < 6:
+ _fix_federation_tables(engine)
+ # The migration is applied again after the fix
+ migration.db_sync(engine, abs_path, version=version,
+ init_version=init_version)
+ else:
+ raise
+
+
+def sync_database_to_version(extension=None, version=None):
+ if not extension:
+ _sync_common_repo(version)
+ # If version is greater than 0, it is for the common
+ # repository only, and only that will be synchronized.
+ if version is None:
+ for default_extension in DEFAULT_EXTENSIONS:
+ _sync_extension_repo(default_extension, version)
+ else:
+ _sync_extension_repo(extension, version)
+
+
+def get_db_version(extension=None):
+ if not extension:
+ return migration.db_version(sql.get_engine(), find_migrate_repo(),
+ migrate_repo.DB_INIT_VERSION)
+
+ try:
+ package_name = '.'.join((contrib.__name__, extension))
+ package = importutils.import_module(package_name)
+ except ImportError:
+ raise ImportError(_("%s extension does not exist.")
+ % package_name)
+
+ return migration.db_version(
+ sql.get_engine(), find_migrate_repo(package), 0)
+
+
+def print_db_version(extension=None):
+ try:
+ db_version = get_db_version(extension=extension)
+ print(db_version)
+ except exception.MigrationNotProvided as e:
+ print(e)
+ sys.exit(1)
diff --git a/keystone-moon/keystone/common/utils.py b/keystone-moon/keystone/common/utils.py
new file mode 100644
index 00000000..a4b03ffd
--- /dev/null
+++ b/keystone-moon/keystone/common/utils.py
@@ -0,0 +1,471 @@
+# Copyright 2012 OpenStack Foundation
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# Copyright 2011 - 2012 Justin Santa Barbara
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import calendar
+import collections
+import grp
+import hashlib
+import os
+import pwd
+
+from oslo_config import cfg
+from oslo_log import log
+from oslo_serialization import jsonutils
+from oslo_utils import strutils
+import passlib.hash
+import six
+from six import moves
+
+from keystone import exception
+from keystone.i18n import _, _LE, _LW
+
+
+CONF = cfg.CONF
+
+LOG = log.getLogger(__name__)
+
+
+def flatten_dict(d, parent_key=''):
+ """Flatten a nested dictionary
+
+ Converts a dictionary with nested values to a single level flat
+ dictionary, with dotted notation for each key.
+
+ """
+ items = []
+ for k, v in d.items():
+ new_key = parent_key + '.' + k if parent_key else k
+ if isinstance(v, collections.MutableMapping):
+ items.extend(flatten_dict(v, new_key).items())
+ else:
+ items.append((new_key, v))
+ return dict(items)
+
+
+def read_cached_file(filename, cache_info, reload_func=None):
+ """Read from a file if it has been modified.
+
+ :param cache_info: dictionary to hold opaque cache.
+ :param reload_func: optional function to be called with data when
+ file is reloaded due to a modification.
+
+ :returns: data from file.
+
+ """
+ mtime = os.path.getmtime(filename)
+ if not cache_info or mtime != cache_info.get('mtime'):
+ with open(filename) as fap:
+ cache_info['data'] = fap.read()
+ cache_info['mtime'] = mtime
+ if reload_func:
+ reload_func(cache_info['data'])
+ return cache_info['data']
+
+
+class SmarterEncoder(jsonutils.json.JSONEncoder):
+ """Help for JSON encoding dict-like objects."""
+ def default(self, obj):
+ if not isinstance(obj, dict) and hasattr(obj, 'iteritems'):
+ return dict(obj.iteritems())
+ return super(SmarterEncoder, self).default(obj)
+
+
+class PKIEncoder(SmarterEncoder):
+ """Special encoder to make token JSON a bit shorter."""
+ item_separator = ','
+ key_separator = ':'
+
+
+def verify_length_and_trunc_password(password):
+ """Verify and truncate the provided password to the max_password_length."""
+ max_length = CONF.identity.max_password_length
+ try:
+ if len(password) > max_length:
+ if CONF.strict_password_check:
+ raise exception.PasswordVerificationError(size=max_length)
+ else:
+ LOG.warning(
+ _LW('Truncating user password to '
+ '%d characters.'), max_length)
+ return password[:max_length]
+ else:
+ return password
+ except TypeError:
+ raise exception.ValidationError(attribute='string', target='password')
+
+
+def hash_access_key(access):
+ hash_ = hashlib.sha256()
+ hash_.update(access)
+ return hash_.hexdigest()
+
+
+def hash_user_password(user):
+ """Hash a user dict's password without modifying the passed-in dict."""
+ password = user.get('password')
+ if password is None:
+ return user
+
+ return dict(user, password=hash_password(password))
+
+
+def hash_password(password):
+ """Hash a password. Hard."""
+ password_utf8 = verify_length_and_trunc_password(password).encode('utf-8')
+ return passlib.hash.sha512_crypt.encrypt(
+ password_utf8, rounds=CONF.crypt_strength)
+
+
+def check_password(password, hashed):
+ """Check that a plaintext password matches hashed.
+
+ hashpw returns the salt value concatenated with the actual hash value.
+ It extracts the actual salt if this value is then passed as the salt.
+
+ """
+ if password is None or hashed is None:
+ return False
+ password_utf8 = verify_length_and_trunc_password(password).encode('utf-8')
+ return passlib.hash.sha512_crypt.verify(password_utf8, hashed)
+
+
+def attr_as_boolean(val_attr):
+ """Returns the boolean value, decoded from a string.
+
+ We test explicitly for a value meaning False, which can be one of
+ several formats as specified in oslo strutils.FALSE_STRINGS.
+ All other string values (including an empty string) are treated as
+ meaning True.
+
+ """
+ return strutils.bool_from_string(val_attr, default=True)
+
+
+def get_blob_from_credential(credential):
+ try:
+ blob = jsonutils.loads(credential.blob)
+ except (ValueError, TypeError):
+ raise exception.ValidationError(
+ message=_('Invalid blob in credential'))
+ if not blob or not isinstance(blob, dict):
+ raise exception.ValidationError(attribute='blob',
+ target='credential')
+ return blob
+
+
+def convert_ec2_to_v3_credential(ec2credential):
+ blob = {'access': ec2credential.access,
+ 'secret': ec2credential.secret}
+ return {'id': hash_access_key(ec2credential.access),
+ 'user_id': ec2credential.user_id,
+ 'project_id': ec2credential.tenant_id,
+ 'blob': jsonutils.dumps(blob),
+ 'type': 'ec2',
+ 'extra': jsonutils.dumps({})}
+
+
+def convert_v3_to_ec2_credential(credential):
+ blob = get_blob_from_credential(credential)
+ return {'access': blob.get('access'),
+ 'secret': blob.get('secret'),
+ 'user_id': credential.user_id,
+ 'tenant_id': credential.project_id,
+ }
+
+
+def unixtime(dt_obj):
+ """Format datetime object as unix timestamp
+
+ :param dt_obj: datetime.datetime object
+ :returns: float
+
+ """
+ return calendar.timegm(dt_obj.utctimetuple())
+
+
+def auth_str_equal(provided, known):
+ """Constant-time string comparison.
+
+ :params provided: the first string
+ :params known: the second string
+
+ :return: True if the strings are equal.
+
+ This function takes two strings and compares them. It is intended to be
+ used when doing a comparison for authentication purposes to help guard
+ against timing attacks. When using the function for this purpose, always
+ provide the user-provided password as the first argument. The time this
+ function will take is always a factor of the length of this string.
+ """
+ result = 0
+ p_len = len(provided)
+ k_len = len(known)
+ for i in moves.range(p_len):
+ a = ord(provided[i]) if i < p_len else 0
+ b = ord(known[i]) if i < k_len else 0
+ result |= a ^ b
+ return (p_len == k_len) & (result == 0)
+
+
+def setup_remote_pydev_debug():
+ if CONF.pydev_debug_host and CONF.pydev_debug_port:
+ try:
+ try:
+ from pydev import pydevd
+ except ImportError:
+ import pydevd
+
+ pydevd.settrace(CONF.pydev_debug_host,
+ port=CONF.pydev_debug_port,
+ stdoutToServer=True,
+ stderrToServer=True)
+ return True
+ except Exception:
+ LOG.exception(_LE(
+ 'Error setting up the debug environment. Verify that the '
+ 'option --debug-url has the format <host>:<port> and that a '
+ 'debugger processes is listening on that port.'))
+ raise
+
+
+def get_unix_user(user=None):
+ '''Get the uid and user name.
+
+ This is a convenience utility which accepts a variety of input
+ which might represent a unix user. If successful it returns the uid
+ and name. Valid input is:
+
+ string
+ A string is first considered to be a user name and a lookup is
+ attempted under that name. If no name is found then an attempt
+ is made to convert the string to an integer and perform a
+ lookup as a uid.
+
+ int
+ An integer is interpretted as a uid.
+
+ None
+ None is interpreted to mean use the current process's
+ effective user.
+
+ If the input is a valid type but no user is found a KeyError is
+ raised. If the input is not a valid type a TypeError is raised.
+
+ :param object user: string, int or None specifying the user to
+ lookup.
+
+ :return: tuple of (uid, name)
+ '''
+
+ if isinstance(user, six.string_types):
+ try:
+ user_info = pwd.getpwnam(user)
+ except KeyError:
+ try:
+ i = int(user)
+ except ValueError:
+ raise KeyError("user name '%s' not found" % user)
+ try:
+ user_info = pwd.getpwuid(i)
+ except KeyError:
+ raise KeyError("user id %d not found" % i)
+ elif isinstance(user, int):
+ try:
+ user_info = pwd.getpwuid(user)
+ except KeyError:
+ raise KeyError("user id %d not found" % user)
+ elif user is None:
+ user_info = pwd.getpwuid(os.geteuid())
+ else:
+ raise TypeError('user must be string, int or None; not %s (%r)' %
+ (user.__class__.__name__, user))
+
+ return user_info.pw_uid, user_info.pw_name
+
+
+def get_unix_group(group=None):
+ '''Get the gid and group name.
+
+ This is a convenience utility which accepts a variety of input
+ which might represent a unix group. If successful it returns the gid
+ and name. Valid input is:
+
+ string
+ A string is first considered to be a group name and a lookup is
+ attempted under that name. If no name is found then an attempt
+ is made to convert the string to an integer and perform a
+ lookup as a gid.
+
+ int
+ An integer is interpretted as a gid.
+
+ None
+ None is interpreted to mean use the current process's
+ effective group.
+
+ If the input is a valid type but no group is found a KeyError is
+ raised. If the input is not a valid type a TypeError is raised.
+
+
+ :param object group: string, int or None specifying the group to
+ lookup.
+
+ :return: tuple of (gid, name)
+ '''
+
+ if isinstance(group, six.string_types):
+ try:
+ group_info = grp.getgrnam(group)
+ except KeyError:
+ # Was an int passed as a string?
+ # Try converting to int and lookup by id instead.
+ try:
+ i = int(group)
+ except ValueError:
+ raise KeyError("group name '%s' not found" % group)
+ try:
+ group_info = grp.getgrgid(i)
+ except KeyError:
+ raise KeyError("group id %d not found" % i)
+ elif isinstance(group, int):
+ try:
+ group_info = grp.getgrgid(group)
+ except KeyError:
+ raise KeyError("group id %d not found" % group)
+ elif group is None:
+ group_info = grp.getgrgid(os.getegid())
+ else:
+ raise TypeError('group must be string, int or None; not %s (%r)' %
+ (group.__class__.__name__, group))
+
+ return group_info.gr_gid, group_info.gr_name
+
+
+def set_permissions(path, mode=None, user=None, group=None, log=None):
+ '''Set the ownership and permissions on the pathname.
+
+ Each of the mode, user and group are optional, if None then
+ that aspect is not modified.
+
+ Owner and group may be specified either with a symbolic name
+ or numeric id.
+
+ :param string path: Pathname of directory whose existence is assured.
+ :param object mode: ownership permissions flags (int) i.e. chmod,
+ if None do not set.
+ :param object user: set user, name (string) or uid (integer),
+ if None do not set.
+ :param object group: set group, name (string) or gid (integer)
+ if None do not set.
+ :param logger log: logging.logger object, used to emit log messages,
+ if None no logging is performed.
+ '''
+
+ if user is None:
+ user_uid, user_name = None, None
+ else:
+ user_uid, user_name = get_unix_user(user)
+
+ if group is None:
+ group_gid, group_name = None, None
+ else:
+ group_gid, group_name = get_unix_group(group)
+
+ if log:
+ if mode is None:
+ mode_string = str(mode)
+ else:
+ mode_string = oct(mode)
+ log.debug("set_permissions: "
+ "path='%s' mode=%s user=%s(%s) group=%s(%s)",
+ path, mode_string,
+ user_name, user_uid, group_name, group_gid)
+
+ # Change user and group if specified
+ if user_uid is not None or group_gid is not None:
+ if user_uid is None:
+ user_uid = -1
+ if group_gid is None:
+ group_gid = -1
+ try:
+ os.chown(path, user_uid, group_gid)
+ except OSError as exc:
+ raise EnvironmentError("chown('%s', %s, %s): %s" %
+ (path,
+ user_name, group_name,
+ exc.strerror))
+
+ # Change permission flags
+ if mode is not None:
+ try:
+ os.chmod(path, mode)
+ except OSError as exc:
+ raise EnvironmentError("chmod('%s', %#o): %s" %
+ (path, mode, exc.strerror))
+
+
+def make_dirs(path, mode=None, user=None, group=None, log=None):
+ '''Assure directory exists, set ownership and permissions.
+
+ Assure the directory exists and optionally set its ownership
+ and permissions.
+
+ Each of the mode, user and group are optional, if None then
+ that aspect is not modified.
+
+ Owner and group may be specified either with a symbolic name
+ or numeric id.
+
+ :param string path: Pathname of directory whose existence is assured.
+ :param object mode: ownership permissions flags (int) i.e. chmod,
+ if None do not set.
+ :param object user: set user, name (string) or uid (integer),
+ if None do not set.
+ :param object group: set group, name (string) or gid (integer)
+ if None do not set.
+ :param logger log: logging.logger object, used to emit log messages,
+ if None no logging is performed.
+ '''
+
+ if log:
+ if mode is None:
+ mode_string = str(mode)
+ else:
+ mode_string = oct(mode)
+ log.debug("make_dirs path='%s' mode=%s user=%s group=%s",
+ path, mode_string, user, group)
+
+ if not os.path.exists(path):
+ try:
+ os.makedirs(path)
+ except OSError as exc:
+ raise EnvironmentError("makedirs('%s'): %s" % (path, exc.strerror))
+
+ set_permissions(path, mode, user, group, log)
+
+
+class WhiteListedItemFilter(object):
+
+ def __init__(self, whitelist, data):
+ self._whitelist = set(whitelist or [])
+ self._data = data
+
+ def __getitem__(self, name):
+ if name not in self._whitelist:
+ raise KeyError
+ return self._data[name]
diff --git a/keystone-moon/keystone/common/validation/__init__.py b/keystone-moon/keystone/common/validation/__init__.py
new file mode 100644
index 00000000..f9c58eaf
--- /dev/null
+++ b/keystone-moon/keystone/common/validation/__init__.py
@@ -0,0 +1,62 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+"""Request body validating middleware for OpenStack Identity resources."""
+
+import functools
+
+from keystone.common.validation import validators
+
+
+def validated(request_body_schema, resource_to_validate):
+ """Register a schema to validate a resource reference.
+
+ Registered schema will be used for validating a request body just before
+ API method execution.
+
+ :param request_body_schema: a schema to validate the resource reference
+ :param resource_to_validate: the reference to validate
+
+ """
+ schema_validator = validators.SchemaValidator(request_body_schema)
+
+ def add_validator(func):
+ @functools.wraps(func)
+ def wrapper(*args, **kwargs):
+ if resource_to_validate in kwargs:
+ schema_validator.validate(kwargs[resource_to_validate])
+ return func(*args, **kwargs)
+ return wrapper
+ return add_validator
+
+
+def nullable(property_schema):
+ """Clone a property schema into one that is nullable.
+
+ :param dict property_schema: schema to clone into a nullable schema
+ :returns: a new dict schema
+ """
+ # TODO(dstanek): deal with the case where type is already a list; we don't
+ # do that yet so I'm not wasting time on it
+ new_schema = property_schema.copy()
+ new_schema['type'] = [property_schema['type'], 'null']
+ return new_schema
+
+
+def add_array_type(property_schema):
+ """Convert the parameter schema to be of type list.
+
+ :param dict property_schema: schema to add array type to
+ :returns: a new dict schema
+ """
+ new_schema = property_schema.copy()
+ new_schema['type'] = [property_schema['type'], 'array']
+ return new_schema
diff --git a/keystone-moon/keystone/common/validation/parameter_types.py b/keystone-moon/keystone/common/validation/parameter_types.py
new file mode 100644
index 00000000..c5908836
--- /dev/null
+++ b/keystone-moon/keystone/common/validation/parameter_types.py
@@ -0,0 +1,57 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+"""Common parameter types for validating a request reference."""
+
+boolean = {
+ 'type': 'boolean',
+ 'enum': [True, False]
+}
+
+# NOTE(lbragstad): Be mindful of this pattern as it might require changes
+# once this is used on user names, LDAP-based user names specifically since
+# commas aren't allowed in the following pattern. Here we are only going to
+# check the length of the name and ensure that it's a string. Right now we are
+# not going to validate on a naming pattern for issues with
+# internationalization.
+name = {
+ 'type': 'string',
+ 'minLength': 1,
+ 'maxLength': 255
+}
+
+id_string = {
+ 'type': 'string',
+ 'minLength': 1,
+ 'maxLength': 64,
+ # TODO(lbragstad): Find a way to make this configurable such that the end
+ # user chooses how much control they want over id_strings with a regex
+ 'pattern': '^[a-zA-Z0-9-]+$'
+}
+
+description = {
+ 'type': 'string'
+}
+
+url = {
+ 'type': 'string',
+ 'minLength': 0,
+ 'maxLength': 225,
+ # NOTE(edmondsw): we could do more to validate per various RFCs, but
+ # decision was made to err on the side of leniency. The following is based
+ # on rfc1738 section 2.1
+ 'pattern': '[a-zA-Z0-9+.-]+:.+'
+}
+
+email = {
+ 'type': 'string',
+ 'format': 'email'
+}
diff --git a/keystone-moon/keystone/common/validation/validators.py b/keystone-moon/keystone/common/validation/validators.py
new file mode 100644
index 00000000..a4574176
--- /dev/null
+++ b/keystone-moon/keystone/common/validation/validators.py
@@ -0,0 +1,59 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+"""Internal implementation of request body validating middleware."""
+
+import jsonschema
+
+from keystone import exception
+from keystone.i18n import _
+
+
+class SchemaValidator(object):
+ """Resource reference validator class."""
+
+ validator = None
+ validator_org = jsonschema.Draft4Validator
+
+ def __init__(self, schema):
+ # NOTE(lbragstad): If at some point in the future we want to extend
+ # our validators to include something specific we need to check for,
+ # we can do it here. Nova's V3 API validators extend the validator to
+ # include `self._validate_minimum` and `self._validate_maximum`. This
+ # would be handy if we needed to check for something the jsonschema
+ # didn't by default. See the Nova V3 validator for details on how this
+ # is done.
+ validators = {}
+ validator_cls = jsonschema.validators.extend(self.validator_org,
+ validators)
+ fc = jsonschema.FormatChecker()
+ self.validator = validator_cls(schema, format_checker=fc)
+
+ def validate(self, *args, **kwargs):
+ try:
+ self.validator.validate(*args, **kwargs)
+ except jsonschema.ValidationError as ex:
+ # NOTE: For whole OpenStack message consistency, this error
+ # message has been written in a format consistent with WSME.
+ if len(ex.path) > 0:
+ # NOTE(lbragstad): Here we could think about using iter_errors
+ # as a method of providing invalid parameters back to the
+ # user.
+ # TODO(lbragstad): If the value of a field is confidential or
+ # too long, then we should build the masking in here so that
+ # we don't expose sensitive user information in the event it
+ # fails validation.
+ detail = _("Invalid input for field '%(path)s'. The value is "
+ "'%(value)s'.") % {'path': ex.path.pop(),
+ 'value': ex.instance}
+ else:
+ detail = ex.message
+ raise exception.SchemaValidationError(detail=detail)
diff --git a/keystone-moon/keystone/common/wsgi.py b/keystone-moon/keystone/common/wsgi.py
new file mode 100644
index 00000000..6ee8150d
--- /dev/null
+++ b/keystone-moon/keystone/common/wsgi.py
@@ -0,0 +1,830 @@
+# Copyright 2012 OpenStack Foundation
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# Copyright 2010 OpenStack Foundation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""Utility methods for working with WSGI servers."""
+
+import copy
+import itertools
+import urllib
+
+from oslo_config import cfg
+import oslo_i18n
+from oslo_log import log
+from oslo_serialization import jsonutils
+from oslo_utils import importutils
+from oslo_utils import strutils
+import routes.middleware
+import six
+import webob.dec
+import webob.exc
+
+from keystone.common import dependency
+from keystone.common import json_home
+from keystone.common import utils
+from keystone import exception
+from keystone.i18n import _
+from keystone.i18n import _LI
+from keystone.i18n import _LW
+from keystone.models import token_model
+
+
+CONF = cfg.CONF
+LOG = log.getLogger(__name__)
+
+# Environment variable used to pass the request context
+CONTEXT_ENV = 'openstack.context'
+
+
+# Environment variable used to pass the request params
+PARAMS_ENV = 'openstack.params'
+
+
+def validate_token_bind(context, token_ref):
+ bind_mode = CONF.token.enforce_token_bind
+
+ if bind_mode == 'disabled':
+ return
+
+ if not isinstance(token_ref, token_model.KeystoneToken):
+ raise exception.UnexpectedError(_('token reference must be a '
+ 'KeystoneToken type, got: %s') %
+ type(token_ref))
+ bind = token_ref.bind
+
+ # permissive and strict modes don't require there to be a bind
+ permissive = bind_mode in ('permissive', 'strict')
+
+ # get the named mode if bind_mode is not one of the known
+ name = None if permissive or bind_mode == 'required' else bind_mode
+
+ if not bind:
+ if permissive:
+ # no bind provided and none required
+ return
+ else:
+ LOG.info(_LI("No bind information present in token"))
+ raise exception.Unauthorized()
+
+ if name and name not in bind:
+ LOG.info(_LI("Named bind mode %s not in bind information"), name)
+ raise exception.Unauthorized()
+
+ for bind_type, identifier in six.iteritems(bind):
+ if bind_type == 'kerberos':
+ if not (context['environment'].get('AUTH_TYPE', '').lower()
+ == 'negotiate'):
+ LOG.info(_LI("Kerberos credentials required and not present"))
+ raise exception.Unauthorized()
+
+ if not context['environment'].get('REMOTE_USER') == identifier:
+ LOG.info(_LI("Kerberos credentials do not match "
+ "those in bind"))
+ raise exception.Unauthorized()
+
+ LOG.info(_LI("Kerberos bind authentication successful"))
+
+ elif bind_mode == 'permissive':
+ LOG.debug(("Ignoring unknown bind for permissive mode: "
+ "{%(bind_type)s: %(identifier)s}"),
+ {'bind_type': bind_type, 'identifier': identifier})
+ else:
+ LOG.info(_LI("Couldn't verify unknown bind: "
+ "{%(bind_type)s: %(identifier)s}"),
+ {'bind_type': bind_type, 'identifier': identifier})
+ raise exception.Unauthorized()
+
+
+def best_match_language(req):
+ """Determines the best available locale from the Accept-Language
+ HTTP header passed in the request.
+ """
+
+ if not req.accept_language:
+ return None
+ return req.accept_language.best_match(
+ oslo_i18n.get_available_languages('keystone'))
+
+
+class BaseApplication(object):
+ """Base WSGI application wrapper. Subclasses need to implement __call__."""
+
+ @classmethod
+ def factory(cls, global_config, **local_config):
+ """Used for paste app factories in paste.deploy config files.
+
+ Any local configuration (that is, values under the [app:APPNAME]
+ section of the paste config) will be passed into the `__init__` method
+ as kwargs.
+
+ A hypothetical configuration would look like:
+
+ [app:wadl]
+ latest_version = 1.3
+ paste.app_factory = keystone.fancy_api:Wadl.factory
+
+ which would result in a call to the `Wadl` class as
+
+ import keystone.fancy_api
+ keystone.fancy_api.Wadl(latest_version='1.3')
+
+ You could of course re-implement the `factory` method in subclasses,
+ but using the kwarg passing it shouldn't be necessary.
+
+ """
+ return cls(**local_config)
+
+ def __call__(self, environ, start_response):
+ r"""Subclasses will probably want to implement __call__ like this:
+
+ @webob.dec.wsgify()
+ def __call__(self, req):
+ # Any of the following objects work as responses:
+
+ # Option 1: simple string
+ res = 'message\n'
+
+ # Option 2: a nicely formatted HTTP exception page
+ res = exc.HTTPForbidden(explanation='Nice try')
+
+ # Option 3: a webob Response object (in case you need to play with
+ # headers, or you want to be treated like an iterable, or or or)
+ res = Response();
+ res.app_iter = open('somefile')
+
+ # Option 4: any wsgi app to be run next
+ res = self.application
+
+ # Option 5: you can get a Response object for a wsgi app, too, to
+ # play with headers etc
+ res = req.get_response(self.application)
+
+ # You can then just return your response...
+ return res
+ # ... or set req.response and return None.
+ req.response = res
+
+ See the end of http://pythonpaste.org/webob/modules/dec.html
+ for more info.
+
+ """
+ raise NotImplementedError('You must implement __call__')
+
+
+@dependency.requires('assignment_api', 'policy_api', 'token_provider_api')
+class Application(BaseApplication):
+ @webob.dec.wsgify()
+ def __call__(self, req):
+ arg_dict = req.environ['wsgiorg.routing_args'][1]
+ action = arg_dict.pop('action')
+ del arg_dict['controller']
+
+ # allow middleware up the stack to provide context, params and headers.
+ context = req.environ.get(CONTEXT_ENV, {})
+ context['query_string'] = dict(six.iteritems(req.params))
+ context['headers'] = dict(six.iteritems(req.headers))
+ context['path'] = req.environ['PATH_INFO']
+ scheme = (None if not CONF.secure_proxy_ssl_header
+ else req.environ.get(CONF.secure_proxy_ssl_header))
+ if scheme:
+ # NOTE(andrey-mp): "wsgi.url_scheme" contains the protocol used
+ # before the proxy removed it ('https' usually). So if
+ # the webob.Request instance is modified in order to use this
+ # scheme instead of the one defined by API, the call to
+ # webob.Request.relative_url() will return a URL with the correct
+ # scheme.
+ req.environ['wsgi.url_scheme'] = scheme
+ context['host_url'] = req.host_url
+ params = req.environ.get(PARAMS_ENV, {})
+ # authentication and authorization attributes are set as environment
+ # values by the container and processed by the pipeline. the complete
+ # set is not yet know.
+ context['environment'] = req.environ
+ context['accept_header'] = req.accept
+ req.environ = None
+
+ params.update(arg_dict)
+
+ context.setdefault('is_admin', False)
+
+ # TODO(termie): do some basic normalization on methods
+ method = getattr(self, action)
+
+ # NOTE(morganfainberg): use the request method to normalize the
+ # response code between GET and HEAD requests. The HTTP status should
+ # be the same.
+ req_method = req.environ['REQUEST_METHOD'].upper()
+ LOG.info('%(req_method)s %(path)s?%(params)s', {
+ 'req_method': req_method,
+ 'path': context['path'],
+ 'params': urllib.urlencode(req.params)})
+
+ params = self._normalize_dict(params)
+
+ try:
+ result = method(context, **params)
+ except exception.Unauthorized as e:
+ LOG.warning(
+ _LW("Authorization failed. %(exception)s from "
+ "%(remote_addr)s"),
+ {'exception': e, 'remote_addr': req.environ['REMOTE_ADDR']})
+ return render_exception(e, context=context,
+ user_locale=best_match_language(req))
+ except exception.Error as e:
+ LOG.warning(six.text_type(e))
+ return render_exception(e, context=context,
+ user_locale=best_match_language(req))
+ except TypeError as e:
+ LOG.exception(six.text_type(e))
+ return render_exception(exception.ValidationError(e),
+ context=context,
+ user_locale=best_match_language(req))
+ except Exception as e:
+ LOG.exception(six.text_type(e))
+ return render_exception(exception.UnexpectedError(exception=e),
+ context=context,
+ user_locale=best_match_language(req))
+
+ if result is None:
+ return render_response(status=(204, 'No Content'))
+ elif isinstance(result, six.string_types):
+ return result
+ elif isinstance(result, webob.Response):
+ return result
+ elif isinstance(result, webob.exc.WSGIHTTPException):
+ return result
+
+ response_code = self._get_response_code(req)
+ return render_response(body=result, status=response_code,
+ method=req_method)
+
+ def _get_response_code(self, req):
+ req_method = req.environ['REQUEST_METHOD']
+ controller = importutils.import_class('keystone.common.controller')
+ code = None
+ if isinstance(self, controller.V3Controller) and req_method == 'POST':
+ code = (201, 'Created')
+ return code
+
+ def _normalize_arg(self, arg):
+ return arg.replace(':', '_').replace('-', '_')
+
+ def _normalize_dict(self, d):
+ return {self._normalize_arg(k): v for (k, v) in six.iteritems(d)}
+
+ def assert_admin(self, context):
+ if not context['is_admin']:
+ try:
+ user_token_ref = token_model.KeystoneToken(
+ token_id=context['token_id'],
+ token_data=self.token_provider_api.validate_token(
+ context['token_id']))
+ except exception.TokenNotFound as e:
+ raise exception.Unauthorized(e)
+
+ validate_token_bind(context, user_token_ref)
+ creds = copy.deepcopy(user_token_ref.metadata)
+
+ try:
+ creds['user_id'] = user_token_ref.user_id
+ except exception.UnexpectedError:
+ LOG.debug('Invalid user')
+ raise exception.Unauthorized()
+
+ if user_token_ref.project_scoped:
+ creds['tenant_id'] = user_token_ref.project_id
+ else:
+ LOG.debug('Invalid tenant')
+ raise exception.Unauthorized()
+
+ creds['roles'] = user_token_ref.role_names
+ # Accept either is_admin or the admin role
+ self.policy_api.enforce(creds, 'admin_required', {})
+
+ def _attribute_is_empty(self, ref, attribute):
+ """Returns true if the attribute in the given ref (which is a
+ dict) is empty or None.
+ """
+ return ref.get(attribute) is None or ref.get(attribute) == ''
+
+ def _require_attribute(self, ref, attribute):
+ """Ensures the reference contains the specified attribute.
+
+ Raise a ValidationError if the given attribute is not present
+ """
+ if self._attribute_is_empty(ref, attribute):
+ msg = _('%s field is required and cannot be empty') % attribute
+ raise exception.ValidationError(message=msg)
+
+ def _require_attributes(self, ref, attrs):
+ """Ensures the reference contains the specified attributes.
+
+ Raise a ValidationError if any of the given attributes is not present
+ """
+ missing_attrs = [attribute for attribute in attrs
+ if self._attribute_is_empty(ref, attribute)]
+
+ if missing_attrs:
+ msg = _('%s field(s) cannot be empty') % ', '.join(missing_attrs)
+ raise exception.ValidationError(message=msg)
+
+ def _get_trust_id_for_request(self, context):
+ """Get the trust_id for a call.
+
+ Retrieve the trust_id from the token
+ Returns None if token is not trust scoped
+ """
+ if ('token_id' not in context or
+ context.get('token_id') == CONF.admin_token):
+ LOG.debug(('will not lookup trust as the request auth token is '
+ 'either absent or it is the system admin token'))
+ return None
+
+ try:
+ token_data = self.token_provider_api.validate_token(
+ context['token_id'])
+ except exception.TokenNotFound:
+ LOG.warning(_LW('Invalid token in _get_trust_id_for_request'))
+ raise exception.Unauthorized()
+
+ token_ref = token_model.KeystoneToken(token_id=context['token_id'],
+ token_data=token_data)
+ return token_ref.trust_id
+
+ @classmethod
+ def base_url(cls, context, endpoint_type):
+ url = CONF['%s_endpoint' % endpoint_type]
+
+ if url:
+ substitutions = dict(
+ itertools.chain(six.iteritems(CONF),
+ six.iteritems(CONF.eventlet_server)))
+
+ url = url % substitutions
+ else:
+ # NOTE(jamielennox): if url is not set via the config file we
+ # should set it relative to the url that the user used to get here
+ # so as not to mess with version discovery. This is not perfect.
+ # host_url omits the path prefix, but there isn't another good
+ # solution that will work for all urls.
+ url = context['host_url']
+
+ return url.rstrip('/')
+
+
+class Middleware(Application):
+ """Base WSGI middleware.
+
+ These classes require an application to be
+ initialized that will be called next. By default the middleware will
+ simply call its wrapped app, or you can override __call__ to customize its
+ behavior.
+
+ """
+
+ @classmethod
+ def factory(cls, global_config, **local_config):
+ """Used for paste app factories in paste.deploy config files.
+
+ Any local configuration (that is, values under the [filter:APPNAME]
+ section of the paste config) will be passed into the `__init__` method
+ as kwargs.
+
+ A hypothetical configuration would look like:
+
+ [filter:analytics]
+ redis_host = 127.0.0.1
+ paste.filter_factory = keystone.analytics:Analytics.factory
+
+ which would result in a call to the `Analytics` class as
+
+ import keystone.analytics
+ keystone.analytics.Analytics(app, redis_host='127.0.0.1')
+
+ You could of course re-implement the `factory` method in subclasses,
+ but using the kwarg passing it shouldn't be necessary.
+
+ """
+ def _factory(app):
+ conf = global_config.copy()
+ conf.update(local_config)
+ return cls(app, **local_config)
+ return _factory
+
+ def __init__(self, application):
+ super(Middleware, self).__init__()
+ self.application = application
+
+ def process_request(self, request):
+ """Called on each request.
+
+ If this returns None, the next application down the stack will be
+ executed. If it returns a response then that response will be returned
+ and execution will stop here.
+
+ """
+ return None
+
+ def process_response(self, request, response):
+ """Do whatever you'd like to the response, based on the request."""
+ return response
+
+ @webob.dec.wsgify()
+ def __call__(self, request):
+ try:
+ response = self.process_request(request)
+ if response:
+ return response
+ response = request.get_response(self.application)
+ return self.process_response(request, response)
+ except exception.Error as e:
+ LOG.warning(six.text_type(e))
+ return render_exception(e, request=request,
+ user_locale=best_match_language(request))
+ except TypeError as e:
+ LOG.exception(six.text_type(e))
+ return render_exception(exception.ValidationError(e),
+ request=request,
+ user_locale=best_match_language(request))
+ except Exception as e:
+ LOG.exception(six.text_type(e))
+ return render_exception(exception.UnexpectedError(exception=e),
+ request=request,
+ user_locale=best_match_language(request))
+
+
+class Debug(Middleware):
+ """Helper class for debugging a WSGI application.
+
+ Can be inserted into any WSGI application chain to get information
+ about the request and response.
+
+ """
+
+ @webob.dec.wsgify()
+ def __call__(self, req):
+ if not hasattr(LOG, 'isEnabledFor') or LOG.isEnabledFor(LOG.debug):
+ LOG.debug('%s %s %s', ('*' * 20), 'REQUEST ENVIRON', ('*' * 20))
+ for key, value in req.environ.items():
+ LOG.debug('%s = %s', key,
+ strutils.mask_password(value))
+ LOG.debug('')
+ LOG.debug('%s %s %s', ('*' * 20), 'REQUEST BODY', ('*' * 20))
+ for line in req.body_file:
+ LOG.debug('%s', strutils.mask_password(line))
+ LOG.debug('')
+
+ resp = req.get_response(self.application)
+ if not hasattr(LOG, 'isEnabledFor') or LOG.isEnabledFor(LOG.debug):
+ LOG.debug('%s %s %s', ('*' * 20), 'RESPONSE HEADERS', ('*' * 20))
+ for (key, value) in six.iteritems(resp.headers):
+ LOG.debug('%s = %s', key, value)
+ LOG.debug('')
+
+ resp.app_iter = self.print_generator(resp.app_iter)
+
+ return resp
+
+ @staticmethod
+ def print_generator(app_iter):
+ """Iterator that prints the contents of a wrapper string."""
+ LOG.debug('%s %s %s', ('*' * 20), 'RESPONSE BODY', ('*' * 20))
+ for part in app_iter:
+ LOG.debug(part)
+ yield part
+
+
+class Router(object):
+ """WSGI middleware that maps incoming requests to WSGI apps."""
+
+ def __init__(self, mapper):
+ """Create a router for the given routes.Mapper.
+
+ Each route in `mapper` must specify a 'controller', which is a
+ WSGI app to call. You'll probably want to specify an 'action' as
+ well and have your controller be an object that can route
+ the request to the action-specific method.
+
+ Examples:
+ mapper = routes.Mapper()
+ sc = ServerController()
+
+ # Explicit mapping of one route to a controller+action
+ mapper.connect(None, '/svrlist', controller=sc, action='list')
+
+ # Actions are all implicitly defined
+ mapper.resource('server', 'servers', controller=sc)
+
+ # Pointing to an arbitrary WSGI app. You can specify the
+ # {path_info:.*} parameter so the target app can be handed just that
+ # section of the URL.
+ mapper.connect(None, '/v1.0/{path_info:.*}', controller=BlogApp())
+
+ """
+ self.map = mapper
+ self._router = routes.middleware.RoutesMiddleware(self._dispatch,
+ self.map)
+
+ @webob.dec.wsgify()
+ def __call__(self, req):
+ """Route the incoming request to a controller based on self.map.
+
+ If no match, return a 404.
+
+ """
+ return self._router
+
+ @staticmethod
+ @webob.dec.wsgify()
+ def _dispatch(req):
+ """Dispatch the request to the appropriate controller.
+
+ Called by self._router after matching the incoming request to a route
+ and putting the information into req.environ. Either returns 404
+ or the routed WSGI app's response.
+
+ """
+ match = req.environ['wsgiorg.routing_args'][1]
+ if not match:
+ msg = _('The resource could not be found.')
+ return render_exception(exception.NotFound(msg),
+ request=req,
+ user_locale=best_match_language(req))
+ app = match['controller']
+ return app
+
+
+class ComposingRouter(Router):
+ def __init__(self, mapper=None, routers=None):
+ if mapper is None:
+ mapper = routes.Mapper()
+ if routers is None:
+ routers = []
+ for router in routers:
+ router.add_routes(mapper)
+ super(ComposingRouter, self).__init__(mapper)
+
+
+class ComposableRouter(Router):
+ """Router that supports use by ComposingRouter."""
+
+ def __init__(self, mapper=None):
+ if mapper is None:
+ mapper = routes.Mapper()
+ self.add_routes(mapper)
+ super(ComposableRouter, self).__init__(mapper)
+
+ def add_routes(self, mapper):
+ """Add routes to given mapper."""
+ pass
+
+
+class ExtensionRouter(Router):
+ """A router that allows extensions to supplement or overwrite routes.
+
+ Expects to be subclassed.
+ """
+ def __init__(self, application, mapper=None):
+ if mapper is None:
+ mapper = routes.Mapper()
+ self.application = application
+ self.add_routes(mapper)
+ mapper.connect('{path_info:.*}', controller=self.application)
+ super(ExtensionRouter, self).__init__(mapper)
+
+ def add_routes(self, mapper):
+ pass
+
+ @classmethod
+ def factory(cls, global_config, **local_config):
+ """Used for paste app factories in paste.deploy config files.
+
+ Any local configuration (that is, values under the [filter:APPNAME]
+ section of the paste config) will be passed into the `__init__` method
+ as kwargs.
+
+ A hypothetical configuration would look like:
+
+ [filter:analytics]
+ redis_host = 127.0.0.1
+ paste.filter_factory = keystone.analytics:Analytics.factory
+
+ which would result in a call to the `Analytics` class as
+
+ import keystone.analytics
+ keystone.analytics.Analytics(app, redis_host='127.0.0.1')
+
+ You could of course re-implement the `factory` method in subclasses,
+ but using the kwarg passing it shouldn't be necessary.
+
+ """
+ def _factory(app):
+ conf = global_config.copy()
+ conf.update(local_config)
+ return cls(app, **local_config)
+ return _factory
+
+
+class RoutersBase(object):
+ """Base class for Routers."""
+
+ def __init__(self):
+ self.v3_resources = []
+
+ def append_v3_routers(self, mapper, routers):
+ """Append v3 routers.
+
+ Subclasses should override this method to map its routes.
+
+ Use self._add_resource() to map routes for a resource.
+ """
+
+ def _add_resource(self, mapper, controller, path, rel,
+ get_action=None, head_action=None, get_head_action=None,
+ put_action=None, post_action=None, patch_action=None,
+ delete_action=None, get_post_action=None,
+ path_vars=None, status=None):
+ if get_head_action:
+ getattr(controller, get_head_action) # ensure the attribute exists
+ mapper.connect(path, controller=controller, action=get_head_action,
+ conditions=dict(method=['GET', 'HEAD']))
+ if get_action:
+ getattr(controller, get_action) # ensure the attribute exists
+ mapper.connect(path, controller=controller, action=get_action,
+ conditions=dict(method=['GET']))
+ if head_action:
+ getattr(controller, head_action) # ensure the attribute exists
+ mapper.connect(path, controller=controller, action=head_action,
+ conditions=dict(method=['HEAD']))
+ if put_action:
+ getattr(controller, put_action) # ensure the attribute exists
+ mapper.connect(path, controller=controller, action=put_action,
+ conditions=dict(method=['PUT']))
+ if post_action:
+ getattr(controller, post_action) # ensure the attribute exists
+ mapper.connect(path, controller=controller, action=post_action,
+ conditions=dict(method=['POST']))
+ if patch_action:
+ getattr(controller, patch_action) # ensure the attribute exists
+ mapper.connect(path, controller=controller, action=patch_action,
+ conditions=dict(method=['PATCH']))
+ if delete_action:
+ getattr(controller, delete_action) # ensure the attribute exists
+ mapper.connect(path, controller=controller, action=delete_action,
+ conditions=dict(method=['DELETE']))
+ if get_post_action:
+ getattr(controller, get_post_action) # ensure the attribute exists
+ mapper.connect(path, controller=controller, action=get_post_action,
+ conditions=dict(method=['GET', 'POST']))
+
+ resource_data = dict()
+
+ if path_vars:
+ resource_data['href-template'] = path
+ resource_data['href-vars'] = path_vars
+ else:
+ resource_data['href'] = path
+
+ if status:
+ if not json_home.Status.is_supported(status):
+ raise exception.Error(message=_(
+ 'Unexpected status requested for JSON Home response, %s') %
+ status)
+ resource_data.setdefault('hints', {})
+ resource_data['hints']['status'] = status
+
+ self.v3_resources.append((rel, resource_data))
+
+
+class V3ExtensionRouter(ExtensionRouter, RoutersBase):
+ """Base class for V3 extension router."""
+
+ def __init__(self, application, mapper=None):
+ self.v3_resources = list()
+ super(V3ExtensionRouter, self).__init__(application, mapper)
+
+ def _update_version_response(self, response_data):
+ response_data['resources'].update(self.v3_resources)
+
+ @webob.dec.wsgify()
+ def __call__(self, request):
+ if request.path_info != '/':
+ # Not a request for version info so forward to super.
+ return super(V3ExtensionRouter, self).__call__(request)
+
+ response = request.get_response(self.application)
+
+ if response.status_code != 200:
+ # The request failed, so don't update the response.
+ return response
+
+ if response.headers['Content-Type'] != 'application/json-home':
+ # Not a request for JSON Home document, so don't update the
+ # response.
+ return response
+
+ response_data = jsonutils.loads(response.body)
+ self._update_version_response(response_data)
+ response.body = jsonutils.dumps(response_data,
+ cls=utils.SmarterEncoder)
+ return response
+
+
+def render_response(body=None, status=None, headers=None, method=None):
+ """Forms a WSGI response."""
+ if headers is None:
+ headers = []
+ else:
+ headers = list(headers)
+ headers.append(('Vary', 'X-Auth-Token'))
+
+ if body is None:
+ body = ''
+ status = status or (204, 'No Content')
+ else:
+ content_types = [v for h, v in headers if h == 'Content-Type']
+ if content_types:
+ content_type = content_types[0]
+ else:
+ content_type = None
+
+ JSON_ENCODE_CONTENT_TYPES = ('application/json',
+ 'application/json-home',)
+ if content_type is None or content_type in JSON_ENCODE_CONTENT_TYPES:
+ body = jsonutils.dumps(body, cls=utils.SmarterEncoder)
+ if content_type is None:
+ headers.append(('Content-Type', 'application/json'))
+ status = status or (200, 'OK')
+
+ resp = webob.Response(body=body,
+ status='%s %s' % status,
+ headerlist=headers)
+
+ if method == 'HEAD':
+ # NOTE(morganfainberg): HEAD requests should return the same status
+ # as a GET request and same headers (including content-type and
+ # content-length). The webob.Response object automatically changes
+ # content-length (and other headers) if the body is set to b''. Capture
+ # all headers and reset them on the response object after clearing the
+ # body. The body can only be set to a binary-type (not TextType or
+ # NoneType), so b'' is used here and should be compatible with
+ # both py2x and py3x.
+ stored_headers = resp.headers.copy()
+ resp.body = b''
+ for header, value in six.iteritems(stored_headers):
+ resp.headers[header] = value
+
+ return resp
+
+
+def render_exception(error, context=None, request=None, user_locale=None):
+ """Forms a WSGI response based on the current error."""
+
+ error_message = error.args[0]
+ message = oslo_i18n.translate(error_message, desired_locale=user_locale)
+ if message is error_message:
+ # translate() didn't do anything because it wasn't a Message,
+ # convert to a string.
+ message = six.text_type(message)
+
+ body = {'error': {
+ 'code': error.code,
+ 'title': error.title,
+ 'message': message,
+ }}
+ headers = []
+ if isinstance(error, exception.AuthPluginException):
+ body['error']['identity'] = error.authentication
+ elif isinstance(error, exception.Unauthorized):
+ url = CONF.public_endpoint
+ if not url:
+ if request:
+ context = {'host_url': request.host_url}
+ if context:
+ url = Application.base_url(context, 'public')
+ else:
+ url = 'http://localhost:%d' % CONF.eventlet_server.public_port
+ else:
+ substitutions = dict(
+ itertools.chain(six.iteritems(CONF),
+ six.iteritems(CONF.eventlet_server)))
+ url = url % substitutions
+
+ headers.append(('WWW-Authenticate', 'Keystone uri="%s"' % url))
+ return render_response(status=(error.code, error.title),
+ body=body,
+ headers=headers)