aboutsummaryrefslogtreecommitdiffstats
path: root/keystone-moon/keystone/common
diff options
context:
space:
mode:
Diffstat (limited to 'keystone-moon/keystone/common')
-rw-r--r--keystone-moon/keystone/common/__init__.py0
-rw-r--r--keystone-moon/keystone/common/authorization.py111
-rw-r--r--keystone-moon/keystone/common/base64utils.py401
-rw-r--r--keystone-moon/keystone/common/cache/__init__.py15
-rw-r--r--keystone-moon/keystone/common/cache/_context_cache.py129
-rw-r--r--keystone-moon/keystone/common/cache/_memcache_pool.py244
-rw-r--r--keystone-moon/keystone/common/cache/backends/__init__.py0
-rw-r--r--keystone-moon/keystone/common/cache/backends/memcache_pool.py28
-rw-r--r--keystone-moon/keystone/common/cache/backends/mongo.py25
-rw-r--r--keystone-moon/keystone/common/cache/backends/noop.py56
-rw-r--r--keystone-moon/keystone/common/cache/core.py124
-rw-r--r--keystone-moon/keystone/common/clean.py87
-rw-r--r--keystone-moon/keystone/common/config.py1259
-rw-r--r--keystone-moon/keystone/common/controller.py835
-rw-r--r--keystone-moon/keystone/common/dependency.py230
-rw-r--r--keystone-moon/keystone/common/driver_hints.py115
-rw-r--r--keystone-moon/keystone/common/environment/__init__.py102
-rw-r--r--keystone-moon/keystone/common/environment/eventlet_server.py212
-rw-r--r--keystone-moon/keystone/common/extension.py44
-rw-r--r--keystone-moon/keystone/common/json_home.py86
-rw-r--r--keystone-moon/keystone/common/kvs/__init__.py32
-rw-r--r--keystone-moon/keystone/common/kvs/backends/__init__.py0
-rw-r--r--keystone-moon/keystone/common/kvs/backends/inmemdb.py68
-rw-r--r--keystone-moon/keystone/common/kvs/backends/memcached.py195
-rw-r--r--keystone-moon/keystone/common/kvs/core.py450
-rw-r--r--keystone-moon/keystone/common/kvs/legacy.py61
-rw-r--r--keystone-moon/keystone/common/ldap/__init__.py15
-rw-r--r--keystone-moon/keystone/common/ldap/core.py1955
-rw-r--r--keystone-moon/keystone/common/manager.py220
-rw-r--r--keystone-moon/keystone/common/models.py196
-rw-r--r--keystone-moon/keystone/common/openssl.py337
-rwxr-xr-xkeystone-moon/keystone/common/pemutils.py509
-rw-r--r--keystone-moon/keystone/common/router.py82
-rw-r--r--keystone-moon/keystone/common/sql/__init__.py15
-rw-r--r--keystone-moon/keystone/common/sql/core.py434
-rw-r--r--keystone-moon/keystone/common/sql/migrate_repo/README4
-rw-r--r--keystone-moon/keystone/common/sql/migrate_repo/__init__.py0
-rw-r--r--keystone-moon/keystone/common/sql/migrate_repo/manage.py5
-rw-r--r--keystone-moon/keystone/common/sql/migrate_repo/migrate.cfg25
-rw-r--r--keystone-moon/keystone/common/sql/migrate_repo/versions/067_kilo.py317
-rw-r--r--keystone-moon/keystone/common/sql/migrate_repo/versions/068_placeholder.py18
-rw-r--r--keystone-moon/keystone/common/sql/migrate_repo/versions/069_placeholder.py18
-rw-r--r--keystone-moon/keystone/common/sql/migrate_repo/versions/070_placeholder.py18
-rw-r--r--keystone-moon/keystone/common/sql/migrate_repo/versions/071_placeholder.py18
-rw-r--r--keystone-moon/keystone/common/sql/migrate_repo/versions/072_placeholder.py18
-rw-r--r--keystone-moon/keystone/common/sql/migrate_repo/versions/073_insert_assignment_inherited_pk.py113
-rw-r--r--keystone-moon/keystone/common/sql/migrate_repo/versions/074_add_is_domain_project.py27
-rw-r--r--keystone-moon/keystone/common/sql/migrate_repo/versions/075_confirm_config_registration.py29
-rw-r--r--keystone-moon/keystone/common/sql/migrate_repo/versions/076_placeholder.py18
-rw-r--r--keystone-moon/keystone/common/sql/migrate_repo/versions/077_placeholder.py18
-rw-r--r--keystone-moon/keystone/common/sql/migrate_repo/versions/078_placeholder.py18
-rw-r--r--keystone-moon/keystone/common/sql/migrate_repo/versions/079_placeholder.py18
-rw-r--r--keystone-moon/keystone/common/sql/migrate_repo/versions/080_placeholder.py18
-rw-r--r--keystone-moon/keystone/common/sql/migrate_repo/versions/081_add_endpoint_policy_table.py54
-rw-r--r--keystone-moon/keystone/common/sql/migrate_repo/versions/082_add_federation_tables.py97
-rw-r--r--keystone-moon/keystone/common/sql/migrate_repo/versions/083_add_oauth1_tables.py75
-rw-r--r--keystone-moon/keystone/common/sql/migrate_repo/versions/084_add_revoke_tables.py55
-rw-r--r--keystone-moon/keystone/common/sql/migrate_repo/versions/085_add_endpoint_filtering_table.py70
-rw-r--r--keystone-moon/keystone/common/sql/migrate_repo/versions/086_add_duplicate_constraint_trusts.py26
-rw-r--r--keystone-moon/keystone/common/sql/migrate_repo/versions/087_implied_roles.py43
-rw-r--r--keystone-moon/keystone/common/sql/migrate_repo/versions/088_domain_specific_roles.py60
-rw-r--r--keystone-moon/keystone/common/sql/migrate_repo/versions/089_add_root_of_all_domains.py76
-rw-r--r--keystone-moon/keystone/common/sql/migrate_repo/versions/090_add_local_user_and_password_tables.py42
-rw-r--r--keystone-moon/keystone/common/sql/migrate_repo/versions/091_migrate_data_to_local_user_and_password_tables.py66
-rw-r--r--keystone-moon/keystone/common/sql/migrate_repo/versions/092_make_implied_roles_fks_cascaded.py46
-rw-r--r--keystone-moon/keystone/common/sql/migrate_repo/versions/093_migrate_domains_to_projects.py125
-rw-r--r--keystone-moon/keystone/common/sql/migrate_repo/versions/094_add_federated_user_table.py43
-rw-r--r--keystone-moon/keystone/common/sql/migrate_repo/versions/095_add_integer_pkey_to_revocation_event_table.py62
-rw-r--r--keystone-moon/keystone/common/sql/migrate_repo/versions/096_drop_role_name_constraint.py50
-rw-r--r--keystone-moon/keystone/common/sql/migrate_repo/versions/__init__.py0
-rw-r--r--keystone-moon/keystone/common/sql/migration_helpers.py245
-rw-r--r--keystone-moon/keystone/common/tokenless_auth.py192
-rw-r--r--keystone-moon/keystone/common/utils.py598
-rw-r--r--keystone-moon/keystone/common/validation/__init__.py96
-rw-r--r--keystone-moon/keystone/common/validation/parameter_types.py70
-rw-r--r--keystone-moon/keystone/common/validation/validators.py58
-rw-r--r--keystone-moon/keystone/common/wsgi.py834
77 files changed, 0 insertions, 12455 deletions
diff --git a/keystone-moon/keystone/common/__init__.py b/keystone-moon/keystone/common/__init__.py
deleted file mode 100644
index e69de29b..00000000
--- a/keystone-moon/keystone/common/__init__.py
+++ /dev/null
diff --git a/keystone-moon/keystone/common/authorization.py b/keystone-moon/keystone/common/authorization.py
deleted file mode 100644
index 414b9525..00000000
--- a/keystone-moon/keystone/common/authorization.py
+++ /dev/null
@@ -1,111 +0,0 @@
-# Copyright 2012 OpenStack Foundation
-# Copyright 2010 United States Government as represented by the
-# Administrator of the National Aeronautics and Space Administration.
-# Copyright 2011 - 2012 Justin Santa Barbara
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from oslo_log import log
-
-from keystone import exception
-from keystone.i18n import _, _LW
-from keystone.models import token_model
-
-
-AUTH_CONTEXT_ENV = 'KEYSTONE_AUTH_CONTEXT'
-"""Environment variable used to convey the Keystone auth context.
-
-Auth context is essentially the user credential used for policy enforcement.
-It is a dictionary with the following attributes:
-
-* ``token``: Token from the request
-* ``user_id``: user ID of the principal
-* ``user_domain_id`` (optional): Domain ID of the principal if the principal
- has a domain.
-* ``project_id`` (optional): project ID of the scoped project if auth is
- project-scoped
-* ``project_domain_id`` (optional): Domain ID of the scoped project if auth is
- project-scoped.
-* ``domain_id`` (optional): domain ID of the scoped domain if auth is
- domain-scoped
-* ``domain_name`` (optional): domain name of the scoped domain if auth is
- domain-scoped
-* ``is_delegated_auth``: True if this is delegated (via trust or oauth)
-* ``trust_id``: Trust ID if trust-scoped, or None
-* ``trustor_id``: Trustor ID if trust-scoped, or None
-* ``trustee_id``: Trustee ID if trust-scoped, or None
-* ``consumer_id``: OAuth consumer ID, or None
-* ``access_token_id``: OAuth access token ID, or None
-* ``roles`` (optional): list of role names for the given scope
-* ``group_ids`` (optional): list of group IDs for which the API user has
- membership if token was for a federated user
-
-"""
-
-LOG = log.getLogger(__name__)
-
-
-def token_to_auth_context(token):
- if not isinstance(token, token_model.KeystoneToken):
- raise exception.UnexpectedError(_('token reference must be a '
- 'KeystoneToken type, got: %s') %
- type(token))
- auth_context = {'token': token,
- 'is_delegated_auth': False}
- try:
- auth_context['user_id'] = token.user_id
- except KeyError:
- LOG.warning(_LW('RBAC: Invalid user data in token'))
- raise exception.Unauthorized()
- auth_context['user_domain_id'] = token.user_domain_id
-
- if token.project_scoped:
- auth_context['project_id'] = token.project_id
- auth_context['project_domain_id'] = token.project_domain_id
- elif token.domain_scoped:
- auth_context['domain_id'] = token.domain_id
- auth_context['domain_name'] = token.domain_name
- else:
- LOG.debug('RBAC: Proceeding without project or domain scope')
-
- if token.trust_scoped:
- auth_context['is_delegated_auth'] = True
- auth_context['trust_id'] = token.trust_id
- auth_context['trustor_id'] = token.trustor_user_id
- auth_context['trustee_id'] = token.trustee_user_id
- else:
- # NOTE(lbragstad): These variables will already be set to None but we
- # add the else statement here for readability.
- auth_context['trust_id'] = None
- auth_context['trustor_id'] = None
- auth_context['trustee_id'] = None
-
- roles = token.role_names
- if roles:
- auth_context['roles'] = roles
-
- if token.oauth_scoped:
- auth_context['is_delegated_auth'] = True
- auth_context['consumer_id'] = token.oauth_consumer_id
- auth_context['access_token_id'] = token.oauth_access_token_id
- else:
- # NOTE(lbragstad): These variables will already be set to None but we
- # add the else statement here for readability.
- auth_context['consumer_id'] = None
- auth_context['access_token_id'] = None
-
- if token.is_federated_user:
- auth_context['group_ids'] = token.federation_group_ids
-
- return auth_context
diff --git a/keystone-moon/keystone/common/base64utils.py b/keystone-moon/keystone/common/base64utils.py
deleted file mode 100644
index d19eade7..00000000
--- a/keystone-moon/keystone/common/base64utils.py
+++ /dev/null
@@ -1,401 +0,0 @@
-# Copyright 2013 Red Hat, Inc.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-"""
-
-Python provides the base64 module as a core module but this is mostly
-limited to encoding and decoding base64 and it's variants. It is often
-useful to be able to perform other operations on base64 text. This
-module is meant to be used in conjunction with the core base64 module.
-
-Standardized base64 is defined in
-RFC-4648 "The Base16, Base32, and Base64 Data Encodings".
-
-This module provides the following base64 utility functionality:
-
- * tests if text is valid base64
- * filter formatting from base64
- * convert base64 between different alphabets
- * Handle padding issues
- - test if base64 is padded
- - removes padding
- - restores padding
- * wraps base64 text into formatted blocks
- - via iterator
- - return formatted string
-
-"""
-
-import re
-import string
-
-import six
-from six.moves import urllib
-
-from keystone.i18n import _
-
-
-class InvalidBase64Error(ValueError):
- pass
-
-base64_alphabet_re = re.compile(r'^[^A-Za-z0-9+/=]+$')
-base64url_alphabet_re = re.compile(r'^[^A-Za-z0-9---_=]+$')
-
-base64_non_alphabet_re = re.compile(r'[^A-Za-z0-9+/=]+')
-base64url_non_alphabet_re = re.compile(r'[^A-Za-z0-9---_=]+')
-
-_strip_formatting_re = re.compile(r'\s+')
-
-if six.PY2:
- str_ = string
-else:
- str_ = str
-
-_base64_to_base64url_trans = str_.maketrans('+/', '-_')
-_base64url_to_base64_trans = str_.maketrans('-_', '+/')
-
-
-def _check_padding_length(pad):
- if len(pad) != 1:
- raise ValueError(_('pad must be single character'))
-
-
-def is_valid_base64(text):
- """Test if input text can be base64 decoded.
-
- :param text: input base64 text
- :type text: string
- :returns: bool -- True if text can be decoded as base64, False otherwise
- """
-
- text = filter_formatting(text)
-
- if base64_non_alphabet_re.search(text):
- return False
-
- try:
- return base64_is_padded(text)
- except InvalidBase64Error:
- return False
-
-
-def is_valid_base64url(text):
- """Test if input text can be base64url decoded.
-
- :param text: input base64 text
- :type text: string
- :returns: bool -- True if text can be decoded as base64url,
- False otherwise
- """
-
- text = filter_formatting(text)
-
- if base64url_non_alphabet_re.search(text):
- return False
-
- try:
- return base64_is_padded(text)
- except InvalidBase64Error:
- return False
-
-
-def filter_formatting(text):
- """Return base64 text without any formatting, just the base64.
-
- Base64 text is often formatted with whitespace, line endings,
- etc. This function strips out any formatting, the result will
- contain only base64 characters.
-
- Note, this function does not filter out all non-base64 alphabet
- characters, it only removes characters used for formatting.
-
- :param text: input text to filter
- :type text: string
- :returns: string -- filtered text without formatting
- """
- return _strip_formatting_re.sub('', text)
-
-
-def base64_to_base64url(text):
- """Convert base64 text to base64url text.
-
- base64url text is designed to be safe for use in file names and
- URL's. It is defined in RFC-4648 Section 5.
-
- base64url differs from base64 in the last two alphabet characters
- at index 62 and 63, these are sometimes referred as the
- altchars. The '+' character at index 62 is replaced by '-'
- (hyphen) and the '/' character at index 63 is replaced by '_'
- (underscore).
-
- This function only translates the altchars, non-alphabet
- characters are not filtered out.
-
- WARNING::
-
- base64url continues to use the '=' pad character which is NOT URL
- safe. RFC-4648 suggests two alternate methods to deal with this:
-
- percent-encode
- percent-encode the pad character (e.g. '=' becomes
- '%3D'). This makes the base64url text fully safe. But
- percent-encoding has the downside of requiring
- percent-decoding prior to feeding the base64url text into a
- base64url decoder since most base64url decoders do not
- recognize %3D as a pad character and most decoders require
- correct padding.
-
- no-padding
- padding is not strictly necessary to decode base64 or
- base64url text, the pad can be computed from the input text
- length. However many decoders demand padding and will consider
- non-padded text to be malformed. If one wants to omit the
- trailing pad character(s) for use in URL's it can be added back
- using the base64_assure_padding() function.
-
- This function makes no decisions about which padding methodology to
- use. One can either call base64_strip_padding() to remove any pad
- characters (restoring later with base64_assure_padding()) or call
- base64url_percent_encode() to percent-encode the pad characters.
-
- :param text: input base64 text
- :type text: string
- :returns: string -- base64url text
- """
- return text.translate(_base64_to_base64url_trans)
-
-
-def base64url_to_base64(text):
- """Convert base64url text to base64 text.
-
- See base64_to_base64url() for a description of base64url text and
- it's issues.
-
- This function does NOT handle percent-encoded pad characters, they
- will be left intact. If the input base64url text is
- percent-encoded you should call
-
- :param text: text in base64url alphabet
- :type text: string
- :returns: string -- text in base64 alphabet
-
- """
- return text.translate(_base64url_to_base64_trans)
-
-
-def base64_is_padded(text, pad='='):
- """Test if the text is base64 padded.
-
- The input text must be in a base64 alphabet. The pad must be a
- single character. If the text has been percent-encoded (e.g. pad
- is the string '%3D') you must convert the text back to a base64
- alphabet (e.g. if percent-encoded use the function
- base64url_percent_decode()).
-
- :param text: text containing ONLY characters in a base64 alphabet
- :type text: string
- :param pad: pad character (must be single character) (default: '=')
- :type pad: string
- :returns: bool -- True if padded, False otherwise
- :raises: ValueError, InvalidBase64Error
- """
-
- _check_padding_length(pad)
-
- text_len = len(text)
- if text_len > 0 and text_len % 4 == 0:
- pad_index = text.find(pad)
- if pad_index >= 0 and pad_index < text_len - 2:
- raise InvalidBase64Error(_('text is multiple of 4, '
- 'but pad "%s" occurs before '
- '2nd to last char') % pad)
- if pad_index == text_len - 2 and text[-1] != pad:
- raise InvalidBase64Error(_('text is multiple of 4, '
- 'but pad "%s" occurs before '
- 'non-pad last char') % pad)
- return True
-
- if text.find(pad) >= 0:
- raise InvalidBase64Error(_('text is not a multiple of 4, '
- 'but contains pad "%s"') % pad)
- return False
-
-
-def base64url_percent_encode(text):
- """Percent-encode base64url padding.
-
- The input text should only contain base64url alphabet
- characters. Any non-base64url alphabet characters will also be
- subject to percent-encoding.
-
- :param text: text containing ONLY characters in the base64url alphabet
- :type text: string
- :returns: string -- percent-encoded base64url text
- :raises: InvalidBase64Error
- """
-
- if len(text) % 4 != 0:
- raise InvalidBase64Error(_('padded base64url text must be '
- 'multiple of 4 characters'))
-
- return urllib.parse.quote(text)
-
-
-def base64url_percent_decode(text):
- """Percent-decode base64url padding.
-
- The input text should only contain base64url alphabet
- characters and the percent-encoded pad character. Any other
- percent-encoded characters will be subject to percent-decoding.
-
- :param text: base64url alphabet text
- :type text: string
- :returns: string -- percent-decoded base64url text
- """
-
- decoded_text = urllib.parse.unquote(text)
-
- if len(decoded_text) % 4 != 0:
- raise InvalidBase64Error(_('padded base64url text must be '
- 'multiple of 4 characters'))
-
- return decoded_text
-
-
-def base64_strip_padding(text, pad='='):
- """Remove padding from input base64 text.
-
- :param text: text containing ONLY characters in a base64 alphabet
- :type text: string
- :param pad: pad character (must be single character) (default: '=')
- :type pad: string
- :returns: string -- base64 text without padding
- :raises: ValueError
- """
- _check_padding_length(pad)
-
- # Can't be padded if text is less than 4 characters.
- if len(text) < 4:
- return text
-
- if text[-1] == pad:
- if text[-2] == pad:
- return text[0:-2]
- else:
- return text[0:-1]
- else:
- return text
-
-
-def base64_assure_padding(text, pad='='):
- """Assure the input text ends with padding.
-
- Base64 text is normally expected to be a multiple of 4
- characters. Each 4 character base64 sequence produces 3 octets of
- binary data. If the binary data is not a multiple of 3 the base64
- text is padded at the end with a pad character such that it is
- always a multiple of 4. Padding is ignored and does not alter the
- binary data nor it's length.
-
- In some circumstances it is desirable to omit the padding
- character due to transport encoding conflicts. Base64 text can
- still be correctly decoded if the length of the base64 text
- (consisting only of characters in the desired base64 alphabet) is
- known, padding is not absolutely necessary.
-
- Some base64 decoders demand correct padding or one may wish to
- format RFC compliant base64, this function performs this action.
-
- Input is assumed to consist only of members of a base64
- alphabet (i.e no whitespace). Iteration yields a sequence of lines.
- The line does NOT terminate with a line ending.
-
- Use the filter_formatting() function to assure the input text
- contains only the members of the alphabet.
-
- If the text ends with the pad it is assumed to already be
- padded. Otherwise the binary length is computed from the input
- text length and correct number of pad characters are appended.
-
- :param text: text containing ONLY characters in a base64 alphabet
- :type text: string
- :param pad: pad character (must be single character) (default: '=')
- :type pad: string
- :returns: string -- input base64 text with padding
- :raises: ValueError
- """
- _check_padding_length(pad)
-
- if text.endswith(pad):
- return text
-
- n = len(text) % 4
- if n == 0:
- return text
-
- n = 4 - n
- padding = pad * n
- return text + padding
-
-
-def base64_wrap_iter(text, width=64):
- """Fold text into lines of text with max line length.
-
- Input is assumed to consist only of members of a base64
- alphabet (i.e no whitespace). Iteration yields a sequence of lines.
- The line does NOT terminate with a line ending.
-
- Use the filter_formatting() function to assure the input text
- contains only the members of the alphabet.
-
- :param text: text containing ONLY characters in a base64 alphabet
- :type text: string
- :param width: number of characters in each wrapped line (default: 64)
- :type width: int
- :returns: generator -- sequence of lines of base64 text.
- """
-
- text = six.text_type(text)
- for x in six.moves.range(0, len(text), width):
- yield text[x:x + width]
-
-
-def base64_wrap(text, width=64):
- """Fold text into lines of text with max line length.
-
- Input is assumed to consist only of members of a base64
- alphabet (i.e no whitespace). Fold the text into lines whose
- line length is width chars long, terminate each line with line
- ending (default is '\\n'). Return the wrapped text as a single
- string.
-
- Use the filter_formatting() function to assure the input text
- contains only the members of the alphabet.
-
- :param text: text containing ONLY characters in a base64 alphabet
- :type text: string
- :param width: number of characters in each wrapped line (default: 64)
- :type width: int
- :returns: string -- wrapped text.
- """
-
- buf = six.StringIO()
-
- for line in base64_wrap_iter(text, width):
- buf.write(line)
- buf.write(u'\n')
-
- text = buf.getvalue()
- buf.close()
- return text
diff --git a/keystone-moon/keystone/common/cache/__init__.py b/keystone-moon/keystone/common/cache/__init__.py
deleted file mode 100644
index 49502399..00000000
--- a/keystone-moon/keystone/common/cache/__init__.py
+++ /dev/null
@@ -1,15 +0,0 @@
-# Copyright 2013 Metacloud
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from keystone.common.cache.core import * # noqa
diff --git a/keystone-moon/keystone/common/cache/_context_cache.py b/keystone-moon/keystone/common/cache/_context_cache.py
deleted file mode 100644
index 3895ca1f..00000000
--- a/keystone-moon/keystone/common/cache/_context_cache.py
+++ /dev/null
@@ -1,129 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-"""A dogpile.cache proxy that caches objects in the request local cache."""
-from dogpile.cache import api
-from dogpile.cache import proxy
-from oslo_context import context as oslo_context
-from oslo_serialization import msgpackutils
-
-from keystone.models import revoke_model
-
-
-class _RevokeModelHandler(object):
- # NOTE(morganfainberg): There needs to be reserved "registry" entries set
- # in oslo_serialization for application-specific handlers. We picked 127
- # here since it's waaaaaay far out before oslo_serialization will use it.
- identity = 127
- handles = (revoke_model.RevokeTree,)
-
- def __init__(self, registry):
- self._registry = registry
-
- def serialize(self, obj):
- return msgpackutils.dumps(obj.revoke_map,
- registry=self._registry)
-
- def deserialize(self, data):
- revoke_map = msgpackutils.loads(data, registry=self._registry)
- revoke_tree = revoke_model.RevokeTree()
- revoke_tree.revoke_map = revoke_map
- return revoke_tree
-
-
-# Register our new handler.
-_registry = msgpackutils.default_registry
-_registry.frozen = False
-_registry.register(_RevokeModelHandler(registry=_registry))
-_registry.frozen = True
-
-
-class _ResponseCacheProxy(proxy.ProxyBackend):
-
- __key_pfx = '_request_cache_%s'
-
- def _get_request_context(self):
- # Return the current context or a new/empty context.
- return oslo_context.get_current() or oslo_context.RequestContext()
-
- def _get_request_key(self, key):
- return self.__key_pfx % key
-
- def _set_local_cache(self, key, value, ctx=None):
- # Set a serialized version of the returned value in local cache for
- # subsequent calls to the memoized method.
- if not ctx:
- ctx = self._get_request_context()
- serialize = {'payload': value.payload, 'metadata': value.metadata}
- setattr(ctx, self._get_request_key(key), msgpackutils.dumps(serialize))
- ctx.update_store()
-
- def _get_local_cache(self, key):
- # Return the version from our local request cache if it exists.
- ctx = self._get_request_context()
- try:
- value = getattr(ctx, self._get_request_key(key))
- except AttributeError:
- return api.NO_VALUE
-
- value = msgpackutils.loads(value)
- return api.CachedValue(payload=value['payload'],
- metadata=value['metadata'])
-
- def _delete_local_cache(self, key):
- # On invalidate/delete remove the value from the local request cache
- ctx = self._get_request_context()
- try:
- delattr(ctx, self._get_request_key(key))
- ctx.update_store()
- except AttributeError: # nosec
- # NOTE(morganfainberg): We will simply pass here, this value has
- # not been cached locally in the request.
- pass
-
- def get(self, key):
- value = self._get_local_cache(key)
- if value is api.NO_VALUE:
- value = self.proxied.get(key)
- if value is not api.NO_VALUE:
- self._set_local_cache(key, value)
- return value
-
- def set(self, key, value):
- self._set_local_cache(key, value)
- self.proxied.set(key, value)
-
- def delete(self, key):
- self._delete_local_cache(key)
- self.proxied.delete(key)
-
- def get_multi(self, keys):
- values = {}
- for key in keys:
- v = self._get_local_cache(key)
- if v is not api.NO_VALUE:
- values[key] = v
- query_keys = set(keys).difference(set(values.keys()))
- values.update(dict(
- zip(query_keys, self.proxied.get_multi(query_keys))))
- return [values[k] for k in keys]
-
- def set_multi(self, mapping):
- ctx = self._get_request_context()
- for k, v in mapping.items():
- self._set_local_cache(k, v, ctx)
- self.proxied.set_multi(mapping)
-
- def delete_multi(self, keys):
- for k in keys:
- self._delete_local_cache(k)
- self.proxied.delete_multi(keys)
diff --git a/keystone-moon/keystone/common/cache/_memcache_pool.py b/keystone-moon/keystone/common/cache/_memcache_pool.py
deleted file mode 100644
index 2bfcc3bb..00000000
--- a/keystone-moon/keystone/common/cache/_memcache_pool.py
+++ /dev/null
@@ -1,244 +0,0 @@
-# Copyright 2014 Mirantis Inc
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-"""Thread-safe connection pool for python-memcached."""
-
-# NOTE(yorik-sar): this file is copied between keystone and keystonemiddleware
-# and should be kept in sync until we can use external library for this.
-
-import collections
-import contextlib
-import itertools
-import logging
-import threading
-import time
-
-import memcache
-from oslo_log import log
-from six.moves import queue, zip
-
-from keystone import exception
-from keystone.i18n import _
-
-
-LOG = log.getLogger(__name__)
-
-
-class _MemcacheClient(memcache.Client):
- """Thread global memcache client
-
- As client is inherited from threading.local we have to restore object
- methods overloaded by threading.local so we can reuse clients in
- different threads
- """
- __delattr__ = object.__delattr__
- __getattribute__ = object.__getattribute__
- __new__ = object.__new__
- __setattr__ = object.__setattr__
-
- def __del__(self):
- pass
-
-
-_PoolItem = collections.namedtuple('_PoolItem', ['ttl', 'connection'])
-
-
-class ConnectionPool(queue.Queue):
- """Base connection pool class
-
- This class implements the basic connection pool logic as an abstract base
- class.
- """
- def __init__(self, maxsize, unused_timeout, conn_get_timeout=None):
- """Initialize the connection pool.
-
- :param maxsize: maximum number of client connections for the pool
- :type maxsize: int
- :param unused_timeout: idle time to live for unused clients (in
- seconds). If a client connection object has been
- in the pool and idle for longer than the
- unused_timeout, it will be reaped. This is to
- ensure resources are released as utilization
- goes down.
- :type unused_timeout: int
- :param conn_get_timeout: maximum time in seconds to wait for a
- connection. If set to `None` timeout is
- indefinite.
- :type conn_get_timeout: int
- """
- # super() cannot be used here because Queue in stdlib is an
- # old-style class
- queue.Queue.__init__(self, maxsize)
- self._unused_timeout = unused_timeout
- self._connection_get_timeout = conn_get_timeout
- self._acquired = 0
-
- def _create_connection(self):
- """Returns a connection instance.
-
- This is called when the pool needs another instance created.
-
- :returns: a new connection instance
-
- """
- raise NotImplementedError
-
- def _destroy_connection(self, conn):
- """Destroy and cleanup a connection instance.
-
- This is called when the pool wishes to get rid of an existing
- connection. This is the opportunity for a subclass to free up
- resources and cleaup after itself.
-
- :param conn: the connection object to destroy
-
- """
- raise NotImplementedError
-
- def _debug_logger(self, msg, *args, **kwargs):
- if LOG.isEnabledFor(logging.DEBUG):
- thread_id = threading.current_thread().ident
- args = (id(self), thread_id) + args
- prefix = 'Memcached pool %s, thread %s: '
- LOG.debug(prefix + msg, *args, **kwargs)
-
- @contextlib.contextmanager
- def acquire(self):
- self._debug_logger('Acquiring connection')
- try:
- conn = self.get(timeout=self._connection_get_timeout)
- except queue.Empty:
- raise exception.UnexpectedError(
- _('Unable to get a connection from pool id %(id)s after '
- '%(seconds)s seconds.') %
- {'id': id(self), 'seconds': self._connection_get_timeout})
- self._debug_logger('Acquired connection %s', id(conn))
- try:
- yield conn
- finally:
- self._debug_logger('Releasing connection %s', id(conn))
- self._drop_expired_connections()
- try:
- # super() cannot be used here because Queue in stdlib is an
- # old-style class
- queue.Queue.put(self, conn, block=False)
- except queue.Full:
- self._debug_logger('Reaping exceeding connection %s', id(conn))
- self._destroy_connection(conn)
-
- def _qsize(self):
- if self.maxsize:
- return self.maxsize - self._acquired
- else:
- # A value indicating there is always a free connection
- # if maxsize is None or 0
- return 1
-
- # NOTE(dstanek): stdlib and eventlet Queue implementations
- # have different names for the qsize method. This ensures
- # that we override both of them.
- if not hasattr(queue.Queue, '_qsize'):
- qsize = _qsize
-
- def _get(self):
- if self.queue:
- conn = self.queue.pop().connection
- else:
- conn = self._create_connection()
- self._acquired += 1
- return conn
-
- def _drop_expired_connections(self):
- """Drop all expired connections from the right end of the queue."""
- now = time.time()
- while self.queue and self.queue[0].ttl < now:
- conn = self.queue.popleft().connection
- self._debug_logger('Reaping connection %s', id(conn))
- self._destroy_connection(conn)
-
- def _put(self, conn):
- self.queue.append(_PoolItem(
- ttl=time.time() + self._unused_timeout,
- connection=conn,
- ))
- self._acquired -= 1
-
-
-class MemcacheClientPool(ConnectionPool):
- def __init__(self, urls, arguments, **kwargs):
- # super() cannot be used here because Queue in stdlib is an
- # old-style class
- ConnectionPool.__init__(self, **kwargs)
- self.urls = urls
- self._arguments = arguments
- # NOTE(morganfainberg): The host objects expect an int for the
- # deaduntil value. Initialize this at 0 for each host with 0 indicating
- # the host is not dead.
- self._hosts_deaduntil = [0] * len(urls)
-
- def _create_connection(self):
- return _MemcacheClient(self.urls, **self._arguments)
-
- def _destroy_connection(self, conn):
- conn.disconnect_all()
-
- def _get(self):
- # super() cannot be used here because Queue in stdlib is an
- # old-style class
- conn = ConnectionPool._get(self)
- try:
- # Propagate host state known to us to this client's list
- now = time.time()
- for deaduntil, host in zip(self._hosts_deaduntil, conn.servers):
- if deaduntil > now and host.deaduntil <= now:
- host.mark_dead('propagating death mark from the pool')
- host.deaduntil = deaduntil
- except Exception:
- # We need to be sure that connection doesn't leak from the pool.
- # This code runs before we enter context manager's try-finally
- # block, so we need to explicitly release it here.
- # super() cannot be used here because Queue in stdlib is an
- # old-style class
- ConnectionPool._put(self, conn)
- raise
- return conn
-
- def _put(self, conn):
- try:
- # If this client found that one of the hosts is dead, mark it as
- # such in our internal list
- now = time.time()
- for i, host in zip(itertools.count(), conn.servers):
- deaduntil = self._hosts_deaduntil[i]
- # Do nothing if we already know this host is dead
- if deaduntil <= now:
- if host.deaduntil > now:
- self._hosts_deaduntil[i] = host.deaduntil
- self._debug_logger(
- 'Marked host %s dead until %s',
- self.urls[i], host.deaduntil)
- else:
- self._hosts_deaduntil[i] = 0
- # If all hosts are dead we should forget that they're dead. This
- # way we won't get completely shut off until dead_retry seconds
- # pass, but will be checking servers as frequent as we can (over
- # way smaller socket_timeout)
- if all(deaduntil > now for deaduntil in self._hosts_deaduntil):
- self._debug_logger('All hosts are dead. Marking them as live.')
- self._hosts_deaduntil[:] = [0] * len(self._hosts_deaduntil)
- finally:
- # super() cannot be used here because Queue in stdlib is an
- # old-style class
- ConnectionPool._put(self, conn)
diff --git a/keystone-moon/keystone/common/cache/backends/__init__.py b/keystone-moon/keystone/common/cache/backends/__init__.py
deleted file mode 100644
index e69de29b..00000000
--- a/keystone-moon/keystone/common/cache/backends/__init__.py
+++ /dev/null
diff --git a/keystone-moon/keystone/common/cache/backends/memcache_pool.py b/keystone-moon/keystone/common/cache/backends/memcache_pool.py
deleted file mode 100644
index bbe4785a..00000000
--- a/keystone-moon/keystone/common/cache/backends/memcache_pool.py
+++ /dev/null
@@ -1,28 +0,0 @@
-# Copyright 2014 Mirantis Inc
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-"""This module is deprecated."""
-
-from oslo_cache.backends import memcache_pool
-from oslo_log import versionutils
-
-
-@versionutils.deprecated(
- versionutils.deprecated.MITAKA,
- what='keystone.cache.memcache_pool backend',
- in_favor_of='oslo_cache.memcache_pool backend',
- remove_in=+1)
-class PooledMemcachedBackend(memcache_pool.PooledMemcachedBackend):
- pass
diff --git a/keystone-moon/keystone/common/cache/backends/mongo.py b/keystone-moon/keystone/common/cache/backends/mongo.py
deleted file mode 100644
index 861aefed..00000000
--- a/keystone-moon/keystone/common/cache/backends/mongo.py
+++ /dev/null
@@ -1,25 +0,0 @@
-# Copyright 2014 Hewlett-Packard Development Company, L.P.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from oslo_cache.backends import mongo
-from oslo_log import versionutils
-
-
-@versionutils.deprecated(
- versionutils.deprecated.MITAKA,
- what='keystone.cache.mongo backend',
- in_favor_of='oslo_cache.mongo backend',
- remove_in=+1)
-class MongoCacheBackend(mongo.MongoCacheBackend):
- pass
diff --git a/keystone-moon/keystone/common/cache/backends/noop.py b/keystone-moon/keystone/common/cache/backends/noop.py
deleted file mode 100644
index eda06ec8..00000000
--- a/keystone-moon/keystone/common/cache/backends/noop.py
+++ /dev/null
@@ -1,56 +0,0 @@
-# Copyright 2013 Metacloud
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from dogpile.cache import api
-from oslo_log import versionutils
-
-
-NO_VALUE = api.NO_VALUE
-
-
-@versionutils.deprecated(
- versionutils.deprecated.MITAKA,
- what='keystone.common.cache.noop backend',
- in_favor_of="dogpile.cache's Null backend",
- remove_in=+1)
-class NoopCacheBackend(api.CacheBackend):
- """A no op backend as a default caching backend.
-
- The no op backend is provided as the default caching backend for keystone
- to ensure that ``dogpile.cache.memory`` is not used in any real-world
- circumstances unintentionally. ``dogpile.cache.memory`` does not have a
- mechanism to cleanup it's internal dict and therefore could cause run-away
- memory utilization.
- """
-
- def __init__(self, *args):
- return
-
- def get(self, key):
- return NO_VALUE
-
- def get_multi(self, keys):
- return [NO_VALUE for x in keys]
-
- def set(self, key, value):
- return
-
- def set_multi(self, mapping):
- return
-
- def delete(self, key):
- return
-
- def delete_multi(self, keys):
- return
diff --git a/keystone-moon/keystone/common/cache/core.py b/keystone-moon/keystone/common/cache/core.py
deleted file mode 100644
index 6bb0af51..00000000
--- a/keystone-moon/keystone/common/cache/core.py
+++ /dev/null
@@ -1,124 +0,0 @@
-# Copyright 2013 Metacloud
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-"""Keystone Caching Layer Implementation."""
-import dogpile.cache
-from dogpile.cache import api
-from oslo_cache import core as cache
-from oslo_config import cfg
-
-from keystone.common.cache import _context_cache
-
-
-CONF = cfg.CONF
-CACHE_REGION = cache.create_region()
-
-
-def configure_cache(region=None):
- if region is None:
- region = CACHE_REGION
- # NOTE(morganfainberg): running cache.configure_cache_region()
- # sets region.is_configured, this must be captured before
- # cache.configure_cache_region is called.
- configured = region.is_configured
- cache.configure_cache_region(CONF, region)
- # Only wrap the region if it was not configured. This should be pushed
- # to oslo_cache lib somehow.
- if not configured:
- region.wrap(_context_cache._ResponseCacheProxy)
-
-
-def get_memoization_decorator(group, expiration_group=None, region=None):
- if region is None:
- region = CACHE_REGION
- return cache.get_memoization_decorator(CONF, region, group,
- expiration_group=expiration_group)
-
-
-# NOTE(stevemar): When memcache_pool, mongo and noop backends are removed
-# we no longer need to register the backends here.
-dogpile.cache.register_backend(
- 'keystone.common.cache.noop',
- 'keystone.common.cache.backends.noop',
- 'NoopCacheBackend')
-
-dogpile.cache.register_backend(
- 'keystone.cache.mongo',
- 'keystone.common.cache.backends.mongo',
- 'MongoCacheBackend')
-
-dogpile.cache.register_backend(
- 'keystone.cache.memcache_pool',
- 'keystone.common.cache.backends.memcache_pool',
- 'PooledMemcachedBackend')
-
-
-# TODO(morganfainberg): Move this logic up into oslo.cache directly
-# so we can handle region-wide invalidations or alternatively propose
-# a fix to dogpile.cache to make region-wide invalidates possible to
-# work across distributed processes.
-class _RegionInvalidator(object):
-
- def __init__(self, region, region_name):
- self.region = region
- self.region_name = region_name
- region_key = '_RegionExpiration.%(type)s.%(region_name)s'
- self.soft_region_key = region_key % {'type': 'soft',
- 'region_name': self.region_name}
- self.hard_region_key = region_key % {'type': 'hard',
- 'region_name': self.region_name}
-
- @property
- def hard_invalidated(self):
- invalidated = self.region.backend.get(self.hard_region_key)
- if invalidated is not api.NO_VALUE:
- return invalidated.payload
- return None
-
- @hard_invalidated.setter
- def hard_invalidated(self, value):
- self.region.set(self.hard_region_key, value)
-
- @hard_invalidated.deleter
- def hard_invalidated(self):
- self.region.delete(self.hard_region_key)
-
- @property
- def soft_invalidated(self):
- invalidated = self.region.backend.get(self.soft_region_key)
- if invalidated is not api.NO_VALUE:
- return invalidated.payload
- return None
-
- @soft_invalidated.setter
- def soft_invalidated(self, value):
- self.region.set(self.soft_region_key, value)
-
- @soft_invalidated.deleter
- def soft_invalidated(self):
- self.region.delete(self.soft_region_key)
-
-
-def apply_invalidation_patch(region, region_name):
- """Patch the region interfaces to ensure we share the expiration time.
-
- This method is used to patch region.invalidate, region._hard_invalidated,
- and region._soft_invalidated.
- """
- # Patch the region object. This logic needs to be moved up into dogpile
- # itself. Patching the internal interfaces, unfortunately, is the only
- # way to handle this at the moment.
- invalidator = _RegionInvalidator(region=region, region_name=region_name)
- setattr(region, '_hard_invalidated', invalidator.hard_invalidated)
- setattr(region, '_soft_invalidated', invalidator.soft_invalidated)
diff --git a/keystone-moon/keystone/common/clean.py b/keystone-moon/keystone/common/clean.py
deleted file mode 100644
index 38564e0b..00000000
--- a/keystone-moon/keystone/common/clean.py
+++ /dev/null
@@ -1,87 +0,0 @@
-# Copyright 2012 OpenStack Foundation
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import six
-
-from keystone import exception
-from keystone.i18n import _
-
-
-def check_length(property_name, value, min_length=1, max_length=64):
- if len(value) < min_length:
- if min_length == 1:
- msg = _("%s cannot be empty.") % property_name
- else:
- msg = (_("%(property_name)s cannot be less than "
- "%(min_length)s characters.") % dict(
- property_name=property_name, min_length=min_length))
- raise exception.ValidationError(msg)
- if len(value) > max_length:
- msg = (_("%(property_name)s should not be greater than "
- "%(max_length)s characters.") % dict(
- property_name=property_name, max_length=max_length))
-
- raise exception.ValidationError(msg)
-
-
-def check_type(property_name, value, expected_type, display_expected_type):
- if not isinstance(value, expected_type):
- msg = (_("%(property_name)s is not a "
- "%(display_expected_type)s") % dict(
- property_name=property_name,
- display_expected_type=display_expected_type))
- raise exception.ValidationError(msg)
-
-
-def check_enabled(property_name, enabled):
- # Allow int and it's subclass bool
- check_type('%s enabled' % property_name, enabled, int, 'boolean')
- return bool(enabled)
-
-
-def check_name(property_name, name, min_length=1, max_length=64):
- check_type('%s name' % property_name, name, six.string_types,
- 'str or unicode')
- name = name.strip()
- check_length('%s name' % property_name, name,
- min_length=min_length, max_length=max_length)
- return name
-
-
-def domain_name(name):
- return check_name('Domain', name)
-
-
-def domain_enabled(enabled):
- return check_enabled('Domain', enabled)
-
-
-def project_name(name):
- return check_name('Project', name)
-
-
-def project_enabled(enabled):
- return check_enabled('Project', enabled)
-
-
-def user_name(name):
- return check_name('User', name, max_length=255)
-
-
-def user_enabled(enabled):
- return check_enabled('User', enabled)
-
-
-def group_name(name):
- return check_name('Group', name)
diff --git a/keystone-moon/keystone/common/config.py b/keystone-moon/keystone/common/config.py
deleted file mode 100644
index 56f419b6..00000000
--- a/keystone-moon/keystone/common/config.py
+++ /dev/null
@@ -1,1259 +0,0 @@
-# Copyright 2012 OpenStack Foundation
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import logging
-import os
-
-from oslo_cache import core as cache
-from oslo_config import cfg
-from oslo_log import log
-import oslo_messaging
-from oslo_middleware import cors
-import passlib.utils
-
-from keystone import exception
-
-
-_DEFAULT_AUTH_METHODS = ['external', 'password', 'token', 'oauth1']
-_CERTFILE = '/etc/keystone/ssl/certs/signing_cert.pem'
-_KEYFILE = '/etc/keystone/ssl/private/signing_key.pem'
-_SSO_CALLBACK = '/etc/keystone/sso_callback_template.html'
-
-_DEPRECATE_PKI_MSG = ('PKI token support has been deprecated in the M '
- 'release and will be removed in the O release. Fernet '
- 'or UUID tokens are recommended.')
-
-_DEPRECATE_INHERIT_MSG = ('The option to enable the OS-INHERIT extension has '
- 'been deprecated in the M release and will be '
- 'removed in the O release. The OS-INHERIT extension '
- 'will be enabled by default.')
-
-_DEPRECATE_EP_MSG = ('The option to enable the OS-ENDPOINT-POLICY extension '
- 'has been deprecated in the M release and will be '
- 'removed in the O release. The OS-ENDPOINT-POLICY '
- 'extension will be enabled by default.')
-
-
-FILE_OPTIONS = {
- None: [
- cfg.StrOpt('admin_token', secret=True, default=None,
- help='A "shared secret" that can be used to bootstrap '
- 'Keystone. This "token" does not represent a user, '
- 'and carries no explicit authorization. If set '
- 'to `None`, the value is ignored and the '
- '`admin_token` log in mechanism is effectively '
- 'disabled. To completely disable `admin_token` '
- 'in production (highly recommended), remove '
- 'AdminTokenAuthMiddleware from your paste '
- 'application pipelines (for example, in '
- 'keystone-paste.ini).'),
- cfg.StrOpt('public_endpoint',
- help='The base public endpoint URL for Keystone that is '
- 'advertised to clients (NOTE: this does NOT affect '
- 'how Keystone listens for connections). '
- 'Defaults to the base host URL of the request. E.g. a '
- 'request to http://server:5000/v3/users will '
- 'default to http://server:5000. You should only need '
- 'to set this value if the base URL contains a path '
- '(e.g. /prefix/v3) or the endpoint should be found '
- 'on a different server.'),
- cfg.StrOpt('admin_endpoint',
- help='The base admin endpoint URL for Keystone that is '
- 'advertised to clients (NOTE: this does NOT affect '
- 'how Keystone listens for connections). '
- 'Defaults to the base host URL of the request. E.g. a '
- 'request to http://server:35357/v3/users will '
- 'default to http://server:35357. You should only need '
- 'to set this value if the base URL contains a path '
- '(e.g. /prefix/v3) or the endpoint should be found '
- 'on a different server.'),
- cfg.IntOpt('max_project_tree_depth', default=5,
- help='Maximum depth of the project hierarchy, excluding '
- 'the project acting as a domain at the top of the '
- 'hierarchy. WARNING: setting it to a large value may '
- 'adversely impact performance.'),
- cfg.IntOpt('max_param_size', default=64,
- help='Limit the sizes of user & project ID/names.'),
- # we allow tokens to be a bit larger to accommodate PKI
- cfg.IntOpt('max_token_size', default=8192,
- help='Similar to max_param_size, but provides an '
- 'exception for token values.'),
- cfg.StrOpt('member_role_id',
- default='9fe2ff9ee4384b1894a90878d3e92bab',
- help='Similar to the member_role_name option, this '
- 'represents the default role ID used to associate '
- 'users with their default projects in the v2 API. '
- 'This will be used as the explicit role where one is '
- 'not specified by the v2 API.'),
- cfg.StrOpt('member_role_name', default='_member_',
- help='This is the role name used in combination with the '
- 'member_role_id option; see that option for more '
- 'detail.'),
- # NOTE(lbragstad/morganfainberg): This value of 10k was
- # measured as having an approximate 30% clock-time savings
- # over the old default of 40k. The passlib default is not
- # static and grows over time to constantly approximate ~300ms
- # of CPU time to hash; this was considered too high. This
- # value still exceeds the glibc default of 5k.
- cfg.IntOpt('crypt_strength', default=10000, min=1000, max=100000,
- help='The value passed as the keyword "rounds" to '
- 'passlib\'s encrypt method.'),
- cfg.IntOpt('list_limit',
- help='The maximum number of entities that will be '
- 'returned in a collection, with no limit set by '
- 'default. This global limit may be then overridden '
- 'for a specific driver, by specifying a list_limit '
- 'in the appropriate section (e.g. [assignment]).'),
- cfg.BoolOpt('domain_id_immutable', default=True,
- help='Set this to false if you want to enable the '
- 'ability for user, group and project entities '
- 'to be moved between domains by updating their '
- 'domain_id. Allowing such movement is not '
- 'recommended if the scope of a domain admin is being '
- 'restricted by use of an appropriate policy file '
- '(see policy.v3cloudsample as an example). This '
- 'ability is deprecated and will be removed in a '
- 'future release.',
- deprecated_for_removal=True),
- cfg.BoolOpt('strict_password_check', default=False,
- help='If set to true, strict password length checking is '
- 'performed for password manipulation. If a password '
- 'exceeds the maximum length, the operation will fail '
- 'with an HTTP 403 Forbidden error. If set to false, '
- 'passwords are automatically truncated to the '
- 'maximum length.'),
- cfg.StrOpt('secure_proxy_ssl_header', default='HTTP_X_FORWARDED_PROTO',
- help='The HTTP header used to determine the scheme for the '
- 'original request, even if it was removed by an SSL '
- 'terminating proxy.'),
- cfg.BoolOpt('insecure_debug', default=False,
- help='If set to true the server will return information '
- 'in the response that may allow an unauthenticated '
- 'or authenticated user to get more information than '
- 'normal, such as why authentication failed. This may '
- 'be useful for debugging but is insecure.'),
- ],
- 'identity': [
- cfg.StrOpt('default_domain_id', default='default',
- help='This references the domain to use for all '
- 'Identity API v2 requests (which are not aware of '
- 'domains). A domain with this ID will be created '
- 'for you by keystone-manage db_sync in migration '
- '008. The domain referenced by this ID cannot be '
- 'deleted on the v3 API, to prevent accidentally '
- 'breaking the v2 API. There is nothing special about '
- 'this domain, other than the fact that it must '
- 'exist to order to maintain support for your v2 '
- 'clients.'),
- cfg.BoolOpt('domain_specific_drivers_enabled',
- default=False,
- help='A subset (or all) of domains can have their own '
- 'identity driver, each with their own partial '
- 'configuration options, stored in either the '
- 'resource backend or in a file in a domain '
- 'configuration directory (depending on the setting '
- 'of domain_configurations_from_database). Only '
- 'values specific to the domain need to be specified '
- 'in this manner. This feature is disabled by '
- 'default; set to true to enable.'),
- cfg.BoolOpt('domain_configurations_from_database',
- default=False,
- help='Extract the domain specific configuration options '
- 'from the resource backend where they have been '
- 'stored with the domain data. This feature is '
- 'disabled by default (in which case the domain '
- 'specific options will be loaded from files in the '
- 'domain configuration directory); set to true to '
- 'enable.'),
- cfg.StrOpt('domain_config_dir',
- default='/etc/keystone/domains',
- help='Path for Keystone to locate the domain specific '
- 'identity configuration files if '
- 'domain_specific_drivers_enabled is set to true.'),
- cfg.StrOpt('driver',
- default='sql',
- help='Entrypoint for the identity backend driver in the '
- 'keystone.identity namespace. Supplied drivers are '
- 'ldap and sql.'),
- cfg.BoolOpt('caching', default=True,
- help='Toggle for identity caching. This has no '
- 'effect unless global caching is enabled.'),
- cfg.IntOpt('cache_time', default=600,
- help='Time to cache identity data (in seconds). This has '
- 'no effect unless global and identity caching are '
- 'enabled.'),
- cfg.IntOpt('max_password_length', default=4096,
- max=passlib.utils.MAX_PASSWORD_SIZE,
- help='Maximum supported length for user passwords; '
- 'decrease to improve performance.'),
- cfg.IntOpt('list_limit',
- help='Maximum number of entities that will be returned in '
- 'an identity collection.'),
- ],
- 'identity_mapping': [
- cfg.StrOpt('driver',
- default='sql',
- help='Entrypoint for the identity mapping backend driver '
- 'in the keystone.identity.id_mapping namespace.'),
- cfg.StrOpt('generator',
- default='sha256',
- help='Entrypoint for the public ID generator for user and '
- 'group entities in the keystone.identity.id_generator '
- 'namespace. The Keystone identity mapper only '
- 'supports generators that produce no more than 64 '
- 'characters.'),
- cfg.BoolOpt('backward_compatible_ids',
- default=True,
- help='The format of user and group IDs changed '
- 'in Juno for backends that do not generate UUIDs '
- '(e.g. LDAP), with keystone providing a hash mapping '
- 'to the underlying attribute in LDAP. By default '
- 'this mapping is disabled, which ensures that '
- 'existing IDs will not change. Even when the '
- 'mapping is enabled by using domain specific '
- 'drivers, any users and groups from the default '
- 'domain being handled by LDAP will still not be '
- 'mapped to ensure their IDs remain backward '
- 'compatible. Setting this value to False will '
- 'enable the mapping for even the default LDAP '
- 'driver. It is only safe to do this if you do not '
- 'already have assignments for users and '
- 'groups from the default LDAP domain, and it is '
- 'acceptable for Keystone to provide the different '
- 'IDs to clients than it did previously. Typically '
- 'this means that the only time you can set this '
- 'value to False is when configuring a fresh '
- 'installation.'),
- ],
- 'shadow_users': [
- cfg.StrOpt('driver',
- default='sql',
- help='Entrypoint for the shadow users backend driver '
- 'in the keystone.identity.shadow_users namespace.'),
- ],
- 'trust': [
- cfg.BoolOpt('enabled', default=True,
- help='Delegation and impersonation features can be '
- 'optionally disabled.'),
- cfg.BoolOpt('allow_redelegation', default=False,
- help='Enable redelegation feature.'),
- cfg.IntOpt('max_redelegation_count', default=3,
- help='Maximum depth of trust redelegation.'),
- cfg.StrOpt('driver',
- default='sql',
- help='Entrypoint for the trust backend driver in the '
- 'keystone.trust namespace.')],
- 'os_inherit': [
- cfg.BoolOpt('enabled', default=True,
- deprecated_for_removal=True,
- deprecated_reason=_DEPRECATE_INHERIT_MSG,
- help='role-assignment inheritance to projects from '
- 'owning domain or from projects higher in the '
- 'hierarchy can be optionally disabled. In the '
- 'future, this option will be removed and the '
- 'hierarchy will be always enabled.'),
- ],
- 'fernet_tokens': [
- cfg.StrOpt('key_repository',
- default='/etc/keystone/fernet-keys/',
- help='Directory containing Fernet token keys.'),
- cfg.IntOpt('max_active_keys',
- default=3,
- help='This controls how many keys are held in rotation by '
- 'keystone-manage fernet_rotate before they are '
- 'discarded. The default value of 3 means that '
- 'keystone will maintain one staged key, one primary '
- 'key, and one secondary key. Increasing this value '
- 'means that additional secondary keys will be kept in '
- 'the rotation.'),
- ],
- 'token': [
- cfg.ListOpt('bind', default=[],
- help='External auth mechanisms that should add bind '
- 'information to token, e.g., kerberos,x509.'),
- cfg.StrOpt('enforce_token_bind', default='permissive',
- help='Enforcement policy on tokens presented to Keystone '
- 'with bind information. One of disabled, permissive, '
- 'strict, required or a specifically required bind '
- 'mode, e.g., kerberos or x509 to require binding to '
- 'that authentication.'),
- cfg.IntOpt('expiration', default=3600,
- help='Amount of time a token should remain valid '
- '(in seconds).'),
- cfg.StrOpt('provider',
- default='uuid',
- help='Controls the token construction, validation, and '
- 'revocation operations. Entrypoint in the '
- 'keystone.token.provider namespace. Core providers '
- 'are [fernet|pkiz|pki|uuid].'),
- cfg.StrOpt('driver',
- default='sql',
- help='Entrypoint for the token persistence backend driver '
- 'in the keystone.token.persistence namespace. '
- 'Supplied drivers are kvs, memcache, memcache_pool, '
- 'and sql.'),
- cfg.BoolOpt('caching', default=True,
- help='Toggle for token system caching. This has no '
- 'effect unless global caching is enabled.'),
- cfg.IntOpt('cache_time',
- help='Time to cache tokens (in seconds). This has no '
- 'effect unless global and token caching are '
- 'enabled.'),
- cfg.BoolOpt('revoke_by_id', default=True,
- help='Revoke token by token identifier. Setting '
- 'revoke_by_id to true enables various forms of '
- 'enumerating tokens, e.g. `list tokens for user`. '
- 'These enumerations are processed to determine the '
- 'list of tokens to revoke. Only disable if you are '
- 'switching to using the Revoke extension with a '
- 'backend other than KVS, which stores events in memory.'),
- cfg.BoolOpt('allow_rescope_scoped_token', default=True,
- help='Allow rescoping of scoped token. Setting '
- 'allow_rescoped_scoped_token to false prevents a user '
- 'from exchanging a scoped token for any other token.'),
- cfg.StrOpt('hash_algorithm', default='md5',
- deprecated_for_removal=True,
- deprecated_reason=_DEPRECATE_PKI_MSG,
- help='The hash algorithm to use for PKI tokens. This can '
- 'be set to any algorithm that hashlib supports. '
- 'WARNING: Before changing this value, the auth_token '
- 'middleware must be configured with the '
- 'hash_algorithms, otherwise token revocation will '
- 'not be processed correctly.'),
- cfg.BoolOpt('infer_roles', default=True,
- help='Add roles to token that are not explicitly added, '
- 'but that are linked implicitly to other roles.'),
- ],
- 'revoke': [
- cfg.StrOpt('driver',
- default='sql',
- help='Entrypoint for an implementation of the backend for '
- 'persisting revocation events in the keystone.revoke '
- 'namespace. Supplied drivers are kvs and sql.'),
- cfg.IntOpt('expiration_buffer', default=1800,
- help='This value (calculated in seconds) is added to token '
- 'expiration before a revocation event may be removed '
- 'from the backend.'),
- cfg.BoolOpt('caching', default=True,
- help='Toggle for revocation event caching. This has no '
- 'effect unless global caching is enabled.'),
- cfg.IntOpt('cache_time', default=3600,
- help='Time to cache the revocation list and the revocation '
- 'events (in seconds). This has no effect unless '
- 'global and token caching are enabled.',
- deprecated_opts=[cfg.DeprecatedOpt(
- 'revocation_cache_time', group='token')]),
- ],
- 'ssl': [
- cfg.StrOpt('ca_key',
- default='/etc/keystone/ssl/private/cakey.pem',
- help='Path of the CA key file for SSL.'),
- cfg.IntOpt('key_size', default=1024, min=1024,
- help='SSL key length (in bits) (auto generated '
- 'certificate).'),
- cfg.IntOpt('valid_days', default=3650,
- help='Days the certificate is valid for once signed '
- '(auto generated certificate).'),
- cfg.StrOpt('cert_subject',
- default='/C=US/ST=Unset/L=Unset/O=Unset/CN=localhost',
- help='SSL certificate subject (auto generated '
- 'certificate).'),
- ],
- 'signing': [
- cfg.StrOpt('certfile',
- default=_CERTFILE,
- deprecated_for_removal=True,
- deprecated_reason=_DEPRECATE_PKI_MSG,
- help='Path of the certfile for token signing. For '
- 'non-production environments, you may be interested '
- 'in using `keystone-manage pki_setup` to generate '
- 'self-signed certificates.'),
- cfg.StrOpt('keyfile',
- default=_KEYFILE,
- deprecated_for_removal=True,
- deprecated_reason=_DEPRECATE_PKI_MSG,
- help='Path of the keyfile for token signing.'),
- cfg.StrOpt('ca_certs',
- deprecated_for_removal=True,
- deprecated_reason=_DEPRECATE_PKI_MSG,
- default='/etc/keystone/ssl/certs/ca.pem',
- help='Path of the CA for token signing.'),
- cfg.StrOpt('ca_key',
- default='/etc/keystone/ssl/private/cakey.pem',
- deprecated_for_removal=True,
- deprecated_reason=_DEPRECATE_PKI_MSG,
- help='Path of the CA key for token signing.'),
- cfg.IntOpt('key_size', default=2048, min=1024,
- deprecated_for_removal=True,
- deprecated_reason=_DEPRECATE_PKI_MSG,
- help='Key size (in bits) for token signing cert '
- '(auto generated certificate).'),
- cfg.IntOpt('valid_days', default=3650,
- deprecated_for_removal=True,
- deprecated_reason=_DEPRECATE_PKI_MSG,
- help='Days the token signing cert is valid for '
- '(auto generated certificate).'),
- cfg.StrOpt('cert_subject',
- deprecated_for_removal=True,
- deprecated_reason=_DEPRECATE_PKI_MSG,
- default=('/C=US/ST=Unset/L=Unset/O=Unset/'
- 'CN=www.example.com'),
- help='Certificate subject (auto generated certificate) for '
- 'token signing.'),
- ],
- 'assignment': [
- cfg.StrOpt('driver',
- help='Entrypoint for the assignment backend driver in the '
- 'keystone.assignment namespace. Only an SQL driver is '
- 'supplied. If an assignment driver is not '
- 'specified, the identity driver will choose the '
- 'assignment driver (driver selection based on '
- '`[identity]/driver` option is deprecated and will be '
- 'removed in the "O" release).'),
- cfg.ListOpt('prohibited_implied_role', default=['admin'],
- help='A list of role names which are prohibited from '
- 'being an implied role.'),
- ],
- 'resource': [
- cfg.StrOpt('driver',
- help='Entrypoint for the resource backend driver in the '
- 'keystone.resource namespace. Only an SQL driver is '
- 'supplied. If a resource driver is not specified, '
- 'the assignment driver will choose the resource '
- 'driver.'),
- cfg.BoolOpt('caching', default=True,
- deprecated_opts=[cfg.DeprecatedOpt('caching',
- group='assignment')],
- help='Toggle for resource caching. This has no effect '
- 'unless global caching is enabled.'),
- cfg.IntOpt('cache_time',
- deprecated_opts=[cfg.DeprecatedOpt('cache_time',
- group='assignment')],
- help='TTL (in seconds) to cache resource data. This has '
- 'no effect unless global caching is enabled.'),
- cfg.IntOpt('list_limit',
- deprecated_opts=[cfg.DeprecatedOpt('list_limit',
- group='assignment')],
- help='Maximum number of entities that will be returned '
- 'in a resource collection.'),
- cfg.StrOpt('admin_project_domain_name',
- help='Name of the domain that owns the '
- '`admin_project_name`. Defaults to None.'),
- cfg.StrOpt('admin_project_name',
- help='Special project for performing administrative '
- 'operations on remote services. Tokens scoped to '
- 'this project will contain the key/value '
- '`is_admin_project=true`. Defaults to None.'),
- cfg.StrOpt('project_name_url_safe',
- choices=['off', 'new', 'strict'], default='off',
- help='Whether the names of projects are restricted from '
- 'containing url reserved characters. If set to new, '
- 'attempts to create or update a project with a url '
- 'unsafe name will return an error. In addition, if '
- 'set to strict, attempts to scope a token using '
- 'an unsafe project name will return an error.'),
- cfg.StrOpt('domain_name_url_safe',
- choices=['off', 'new', 'strict'], default='off',
- help='Whether the names of domains are restricted from '
- 'containing url reserved characters. If set to new, '
- 'attempts to create or update a domain with a url '
- 'unsafe name will return an error. In addition, if '
- 'set to strict, attempts to scope a token using a '
- 'domain name which is unsafe will return an error.'),
- ],
- 'domain_config': [
- cfg.StrOpt('driver',
- default='sql',
- help='Entrypoint for the domain config backend driver in '
- 'the keystone.resource.domain_config namespace.'),
- cfg.BoolOpt('caching', default=True,
- help='Toggle for domain config caching. This has no '
- 'effect unless global caching is enabled.'),
- cfg.IntOpt('cache_time', default=300,
- help='TTL (in seconds) to cache domain config data. This '
- 'has no effect unless domain config caching is '
- 'enabled.'),
- ],
- 'role': [
- # The role driver has no default for backward compatibility reasons.
- # If role driver is not specified, the assignment driver chooses
- # the backend
- cfg.StrOpt('driver',
- help='Entrypoint for the role backend driver in the '
- 'keystone.role namespace. Supplied drivers are ldap '
- 'and sql.'),
- cfg.BoolOpt('caching', default=True,
- help='Toggle for role caching. This has no effect '
- 'unless global caching is enabled.'),
- cfg.IntOpt('cache_time',
- help='TTL (in seconds) to cache role data. This has '
- 'no effect unless global caching is enabled.'),
- cfg.IntOpt('list_limit',
- help='Maximum number of entities that will be returned '
- 'in a role collection.'),
- ],
- 'credential': [
- cfg.StrOpt('driver',
- default='sql',
- help='Entrypoint for the credential backend driver in the '
- 'keystone.credential namespace.'),
- ],
- 'oauth1': [
- cfg.StrOpt('driver',
- default='sql',
- help='Entrypoint for the OAuth backend driver in the '
- 'keystone.oauth1 namespace.'),
- cfg.IntOpt('request_token_duration', default=28800,
- help='Duration (in seconds) for the OAuth Request Token.'),
- cfg.IntOpt('access_token_duration', default=86400,
- help='Duration (in seconds) for the OAuth Access Token.'),
- ],
- 'federation': [
- cfg.StrOpt('driver',
- default='sql',
- help='Entrypoint for the federation backend driver in the '
- 'keystone.federation namespace.'),
- cfg.StrOpt('assertion_prefix', default='',
- help='Value to be used when filtering assertion parameters '
- 'from the environment.'),
- cfg.StrOpt('remote_id_attribute',
- help='Value to be used to obtain the entity ID of the '
- 'Identity Provider from the environment (e.g. if '
- 'using the mod_shib plugin this value is '
- '`Shib-Identity-Provider`).'),
- cfg.StrOpt('federated_domain_name', default='Federated',
- help='A domain name that is reserved to allow federated '
- 'ephemeral users to have a domain concept. Note that '
- 'an admin will not be able to create a domain with '
- 'this name or update an existing domain to this '
- 'name. You are not advised to change this value '
- 'unless you really have to.'),
- cfg.MultiStrOpt('trusted_dashboard', default=[],
- help='A list of trusted dashboard hosts. Before '
- 'accepting a Single Sign-On request to return a '
- 'token, the origin host must be a member of the '
- 'trusted_dashboard list. This configuration '
- 'option may be repeated for multiple values. '
- 'For example: '
- 'trusted_dashboard=http://acme.com/auth/websso '
- 'trusted_dashboard=http://beta.com/auth/websso'),
- cfg.StrOpt('sso_callback_template', default=_SSO_CALLBACK,
- help='Location of Single Sign-On callback handler, will '
- 'return a token to a trusted dashboard host.'),
- ],
- 'policy': [
- cfg.StrOpt('driver',
- default='sql',
- help='Entrypoint for the policy backend driver in the '
- 'keystone.policy namespace. Supplied drivers are '
- 'rules and sql.'),
- cfg.IntOpt('list_limit',
- help='Maximum number of entities that will be returned '
- 'in a policy collection.'),
- ],
- 'endpoint_filter': [
- cfg.StrOpt('driver',
- default='sql',
- help='Entrypoint for the endpoint filter backend driver in '
- 'the keystone.endpoint_filter namespace.'),
- cfg.BoolOpt('return_all_endpoints_if_no_filter', default=True,
- help='Toggle to return all active endpoints if no filter '
- 'exists.'),
- ],
- 'endpoint_policy': [
- cfg.BoolOpt('enabled',
- default=True,
- deprecated_for_removal=True,
- deprecated_reason=_DEPRECATE_EP_MSG,
- help='Enable endpoint_policy functionality.'),
- cfg.StrOpt('driver',
- default='sql',
- help='Entrypoint for the endpoint policy backend driver in '
- 'the keystone.endpoint_policy namespace.'),
- ],
- 'ldap': [
- cfg.StrOpt('url', default='ldap://localhost',
- help='URL(s) for connecting to the LDAP server. Multiple '
- 'LDAP URLs may be specified as a comma separated '
- 'string. The first URL to successfully bind is used '
- 'for the connection.'),
- cfg.StrOpt('user',
- help='User BindDN to query the LDAP server.'),
- cfg.StrOpt('password', secret=True,
- help='Password for the BindDN to query the LDAP server.'),
- cfg.StrOpt('suffix', default='cn=example,cn=com',
- help='LDAP server suffix'),
- cfg.BoolOpt('use_dumb_member', default=False,
- help='If true, will add a dummy member to groups. This is '
- 'required if the objectclass for groups requires the '
- '"member" attribute.'),
- cfg.StrOpt('dumb_member', default='cn=dumb,dc=nonexistent',
- help='DN of the "dummy member" to use when '
- '"use_dumb_member" is enabled.'),
- cfg.BoolOpt('allow_subtree_delete', default=False,
- help='Delete subtrees using the subtree delete control. '
- 'Only enable this option if your LDAP server '
- 'supports subtree deletion.'),
- cfg.StrOpt('query_scope', default='one',
- choices=['one', 'sub'],
- help='The LDAP scope for queries, "one" represents '
- 'oneLevel/singleLevel and "sub" represents '
- 'subtree/wholeSubtree options.'),
- cfg.IntOpt('page_size', default=0,
- help='Maximum results per page; a value of zero ("0") '
- 'disables paging.'),
- cfg.StrOpt('alias_dereferencing', default='default',
- choices=['never', 'searching', 'always', 'finding',
- 'default'],
- help='The LDAP dereferencing option for queries. The '
- '"default" option falls back to using default '
- 'dereferencing configured by your ldap.conf.'),
- cfg.IntOpt('debug_level',
- help='Sets the LDAP debugging level for LDAP calls. '
- 'A value of 0 means that debugging is not enabled. '
- 'This value is a bitmask, consult your LDAP '
- 'documentation for possible values.'),
- cfg.BoolOpt('chase_referrals',
- help='Override the system\'s default referral chasing '
- 'behavior for queries.'),
- cfg.StrOpt('user_tree_dn',
- help='Search base for users. '
- 'Defaults to the suffix value.'),
- cfg.StrOpt('user_filter',
- help='LDAP search filter for users.'),
- cfg.StrOpt('user_objectclass', default='inetOrgPerson',
- help='LDAP objectclass for users.'),
- cfg.StrOpt('user_id_attribute', default='cn',
- help='LDAP attribute mapped to user id. '
- 'WARNING: must not be a multivalued attribute.'),
- cfg.StrOpt('user_name_attribute', default='sn',
- help='LDAP attribute mapped to user name.'),
- cfg.StrOpt('user_description_attribute', default='description',
- help='LDAP attribute mapped to user description.'),
- cfg.StrOpt('user_mail_attribute', default='mail',
- help='LDAP attribute mapped to user email.'),
- cfg.StrOpt('user_pass_attribute', default='userPassword',
- help='LDAP attribute mapped to password.'),
- cfg.StrOpt('user_enabled_attribute', default='enabled',
- help='LDAP attribute mapped to user enabled flag.'),
- cfg.BoolOpt('user_enabled_invert', default=False,
- help='Invert the meaning of the boolean enabled values. '
- 'Some LDAP servers use a boolean lock attribute '
- 'where "true" means an account is disabled. Setting '
- '"user_enabled_invert = true" will allow these lock '
- 'attributes to be used. This setting will have no '
- 'effect if "user_enabled_mask" or '
- '"user_enabled_emulation" settings are in use.'),
- cfg.IntOpt('user_enabled_mask', default=0,
- help='Bitmask integer to indicate the bit that the enabled '
- 'value is stored in if the LDAP server represents '
- '"enabled" as a bit on an integer rather than a '
- 'boolean. A value of "0" indicates the mask is not '
- 'used. If this is not set to "0" the typical value '
- 'is "2". This is typically used when '
- '"user_enabled_attribute = userAccountControl".'),
- cfg.StrOpt('user_enabled_default', default='True',
- help='Default value to enable users. This should match an '
- 'appropriate int value if the LDAP server uses '
- 'non-boolean (bitmask) values to indicate if a user '
- 'is enabled or disabled. If this is not set to "True" '
- 'the typical value is "512". This is typically used '
- 'when "user_enabled_attribute = userAccountControl".'),
- cfg.ListOpt('user_attribute_ignore',
- default=['default_project_id'],
- help='List of attributes stripped off the user on '
- 'update.'),
- cfg.StrOpt('user_default_project_id_attribute',
- help='LDAP attribute mapped to default_project_id for '
- 'users.'),
- cfg.BoolOpt('user_allow_create', default=True,
- deprecated_for_removal=True,
- deprecated_reason="Write support for Identity LDAP "
- "backends has been deprecated in the M "
- "release and will be removed in the O "
- "release.",
- help='Allow user creation in LDAP backend.'),
- cfg.BoolOpt('user_allow_update', default=True,
- deprecated_for_removal=True,
- deprecated_reason="Write support for Identity LDAP "
- "backends has been deprecated in the M "
- "release and will be removed in the O "
- "release.",
- help='Allow user updates in LDAP backend.'),
- cfg.BoolOpt('user_allow_delete', default=True,
- deprecated_for_removal=True,
- deprecated_reason="Write support for Identity LDAP "
- "backends has been deprecated in the M "
- "release and will be removed in the O "
- "release.",
- help='Allow user deletion in LDAP backend.'),
- cfg.BoolOpt('user_enabled_emulation', default=False,
- help='If true, Keystone uses an alternative method to '
- 'determine if a user is enabled or not by checking '
- 'if they are a member of the '
- '"user_enabled_emulation_dn" group.'),
- cfg.StrOpt('user_enabled_emulation_dn',
- help='DN of the group entry to hold enabled users when '
- 'using enabled emulation.'),
- cfg.BoolOpt('user_enabled_emulation_use_group_config', default=False,
- help='Use the "group_member_attribute" and '
- '"group_objectclass" settings to determine '
- 'membership in the emulated enabled group.'),
- cfg.ListOpt('user_additional_attribute_mapping',
- default=[],
- help='List of additional LDAP attributes used for mapping '
- 'additional attribute mappings for users. Attribute '
- 'mapping format is <ldap_attr>:<user_attr>, where '
- 'ldap_attr is the attribute in the LDAP entry and '
- 'user_attr is the Identity API attribute.'),
- cfg.StrOpt('group_tree_dn',
- help='Search base for groups. '
- 'Defaults to the suffix value.'),
- cfg.StrOpt('group_filter',
- help='LDAP search filter for groups.'),
- cfg.StrOpt('group_objectclass', default='groupOfNames',
- help='LDAP objectclass for groups.'),
- cfg.StrOpt('group_id_attribute', default='cn',
- help='LDAP attribute mapped to group id.'),
- cfg.StrOpt('group_name_attribute', default='ou',
- help='LDAP attribute mapped to group name.'),
- cfg.StrOpt('group_member_attribute', default='member',
- help='LDAP attribute mapped to show group membership.'),
- cfg.StrOpt('group_desc_attribute', default='description',
- help='LDAP attribute mapped to group description.'),
- cfg.ListOpt('group_attribute_ignore', default=[],
- help='List of attributes stripped off the group on '
- 'update.'),
- cfg.BoolOpt('group_allow_create', default=True,
- deprecated_for_removal=True,
- deprecated_reason="Write support for Identity LDAP "
- "backends has been deprecated in the M "
- "release and will be removed in the O "
- "release.",
- help='Allow group creation in LDAP backend.'),
- cfg.BoolOpt('group_allow_update', default=True,
- deprecated_for_removal=True,
- deprecated_reason="Write support for Identity LDAP "
- "backends has been deprecated in the M "
- "release and will be removed in the O "
- "release.",
- help='Allow group update in LDAP backend.'),
- cfg.BoolOpt('group_allow_delete', default=True,
- deprecated_for_removal=True,
- deprecated_reason="Write support for Identity LDAP "
- "backends has been deprecated in the M "
- "release and will be removed in the O "
- "release.",
- help='Allow group deletion in LDAP backend.'),
- cfg.ListOpt('group_additional_attribute_mapping',
- default=[],
- help='Additional attribute mappings for groups. Attribute '
- 'mapping format is <ldap_attr>:<user_attr>, where '
- 'ldap_attr is the attribute in the LDAP entry and '
- 'user_attr is the Identity API attribute.'),
-
- cfg.StrOpt('tls_cacertfile',
- help='CA certificate file path for communicating with '
- 'LDAP servers.'),
- cfg.StrOpt('tls_cacertdir',
- help='CA certificate directory path for communicating with '
- 'LDAP servers.'),
- cfg.BoolOpt('use_tls', default=False,
- help='Enable TLS for communicating with LDAP servers.'),
- cfg.StrOpt('tls_req_cert', default='demand',
- choices=['demand', 'never', 'allow'],
- help='Specifies what checks to perform on client '
- 'certificates in an incoming TLS session.'),
- cfg.BoolOpt('use_pool', default=True,
- help='Enable LDAP connection pooling.'),
- cfg.IntOpt('pool_size', default=10,
- help='Connection pool size.'),
- cfg.IntOpt('pool_retry_max', default=3,
- help='Maximum count of reconnect trials.'),
- cfg.FloatOpt('pool_retry_delay', default=0.1,
- help='Time span in seconds to wait between two '
- 'reconnect trials.'),
- cfg.IntOpt('pool_connection_timeout', default=-1,
- help='Connector timeout in seconds. Value -1 indicates '
- 'indefinite wait for response.'),
- cfg.IntOpt('pool_connection_lifetime', default=600,
- help='Connection lifetime in seconds.'),
- cfg.BoolOpt('use_auth_pool', default=True,
- help='Enable LDAP connection pooling for end user '
- 'authentication. If use_pool is disabled, then this '
- 'setting is meaningless and is not used at all.'),
- cfg.IntOpt('auth_pool_size', default=100,
- help='End user auth connection pool size.'),
- cfg.IntOpt('auth_pool_connection_lifetime', default=60,
- help='End user auth connection lifetime in seconds.'),
- cfg.BoolOpt('group_members_are_ids', default=False,
- help='If the members of the group objectclass are user '
- 'IDs rather than DNs, set this to true. This is the '
- 'case when using posixGroup as the group '
- 'objectclass and OpenDirectory.'),
- ],
- 'auth': [
- cfg.ListOpt('methods', default=_DEFAULT_AUTH_METHODS,
- help='Allowed authentication methods.'),
- cfg.StrOpt('password', # nosec : This is the name of the plugin, not
- # a password that needs to be protected.
- help='Entrypoint for the password auth plugin module in '
- 'the keystone.auth.password namespace.'),
- cfg.StrOpt('token',
- help='Entrypoint for the token auth plugin module in the '
- 'keystone.auth.token namespace.'),
- # deals with REMOTE_USER authentication
- cfg.StrOpt('external',
- help='Entrypoint for the external (REMOTE_USER) auth '
- 'plugin module in the keystone.auth.external '
- 'namespace. Supplied drivers are DefaultDomain and '
- 'Domain. The default driver is DefaultDomain.'),
- cfg.StrOpt('oauth1',
- help='Entrypoint for the oAuth1.0 auth plugin module in '
- 'the keystone.auth.oauth1 namespace.'),
- ],
- 'tokenless_auth': [
- cfg.MultiStrOpt('trusted_issuer', default=[],
- help='The list of trusted issuers to further filter '
- 'the certificates that are allowed to '
- 'participate in the X.509 tokenless '
- 'authorization. If the option is absent then '
- 'no certificates will be allowed. '
- 'The naming format for the attributes of a '
- 'Distinguished Name(DN) must be separated by a '
- 'comma and contain no spaces. This configuration '
- 'option may be repeated for multiple values. '
- 'For example: '
- 'trusted_issuer=CN=john,OU=keystone,O=openstack '
- 'trusted_issuer=CN=mary,OU=eng,O=abc'),
- cfg.StrOpt('protocol', default='x509',
- help='The protocol name for the X.509 tokenless '
- 'authorization along with the option issuer_attribute '
- 'below can look up its corresponding mapping.'),
- cfg.StrOpt('issuer_attribute', default='SSL_CLIENT_I_DN',
- help='The issuer attribute that is served as an IdP ID '
- 'for the X.509 tokenless authorization along with '
- 'the protocol to look up its corresponding mapping. '
- 'It is the environment variable in the WSGI '
- 'environment that references to the issuer of the '
- 'client certificate.'),
- ],
- 'paste_deploy': [
- cfg.StrOpt('config_file', default='keystone-paste.ini',
- help='Name of the paste configuration file that defines '
- 'the available pipelines.'),
- ],
- 'memcache': [
- cfg.ListOpt('servers', default=['localhost:11211'],
- help='Memcache servers in the format of "host:port".'),
- cfg.IntOpt('dead_retry',
- default=5 * 60,
- help='Number of seconds memcached server is considered dead'
- ' before it is tried again. This is used by the key '
- 'value store system (e.g. token '
- 'pooled memcached persistence backend).'),
- cfg.IntOpt('socket_timeout',
- default=3,
- help='Timeout in seconds for every call to a server. This '
- 'is used by the key value store system (e.g. token '
- 'pooled memcached persistence backend).'),
- cfg.IntOpt('pool_maxsize',
- default=10,
- help='Max total number of open connections to every'
- ' memcached server. This is used by the key value '
- 'store system (e.g. token pooled memcached '
- 'persistence backend).'),
- cfg.IntOpt('pool_unused_timeout',
- default=60,
- help='Number of seconds a connection to memcached is held'
- ' unused in the pool before it is closed. This is used'
- ' by the key value store system (e.g. token pooled '
- 'memcached persistence backend).'),
- cfg.IntOpt('pool_connection_get_timeout',
- default=10,
- help='Number of seconds that an operation will wait to get '
- 'a memcache client connection. This is used by the '
- 'key value store system (e.g. token pooled memcached '
- 'persistence backend).'),
- ],
- 'catalog': [
- cfg.StrOpt('template_file',
- default='default_catalog.templates',
- help='Catalog template file name for use with the '
- 'template catalog backend.'),
- cfg.StrOpt('driver',
- default='sql',
- help='Entrypoint for the catalog backend driver in the '
- 'keystone.catalog namespace. Supplied drivers are '
- 'kvs, sql, templated, and endpoint_filter.sql'),
- cfg.BoolOpt('caching', default=True,
- help='Toggle for catalog caching. This has no '
- 'effect unless global caching is enabled.'),
- cfg.IntOpt('cache_time',
- help='Time to cache catalog data (in seconds). This has no '
- 'effect unless global and catalog caching are '
- 'enabled.'),
- cfg.IntOpt('list_limit',
- help='Maximum number of entities that will be returned '
- 'in a catalog collection.'),
- ],
- 'kvs': [
- cfg.ListOpt('backends', default=[],
- help='Extra dogpile.cache backend modules to register '
- 'with the dogpile.cache library.'),
- cfg.StrOpt('config_prefix', default='keystone.kvs',
- help='Prefix for building the configuration dictionary '
- 'for the KVS region. This should not need to be '
- 'changed unless there is another dogpile.cache '
- 'region with the same configuration name.'),
- cfg.BoolOpt('enable_key_mangler', default=True,
- help='Toggle to disable using a key-mangling function '
- 'to ensure fixed length keys. This is toggle-able '
- 'for debugging purposes, it is highly recommended '
- 'to always leave this set to true.'),
- cfg.IntOpt('default_lock_timeout', default=5,
- help='Default lock timeout (in seconds) for distributed '
- 'locking.'),
- ],
- 'saml': [
- cfg.IntOpt('assertion_expiration_time', default=3600,
- help='Default TTL, in seconds, for any generated SAML '
- 'assertion created by Keystone.'),
- cfg.StrOpt('xmlsec1_binary',
- default='xmlsec1',
- help='Binary to be called for XML signing. Install the '
- 'appropriate package, specify absolute path or adjust '
- 'your PATH environment variable if the binary cannot '
- 'be found.'),
- cfg.StrOpt('certfile',
- default=_CERTFILE,
- help='Path of the certfile for SAML signing. For '
- 'non-production environments, you may be interested '
- 'in using `keystone-manage pki_setup` to generate '
- 'self-signed certificates. Note, the path cannot '
- 'contain a comma.'),
- cfg.StrOpt('keyfile',
- default=_KEYFILE,
- help='Path of the keyfile for SAML signing. Note, the path '
- 'cannot contain a comma.'),
- cfg.StrOpt('idp_entity_id',
- help='Entity ID value for unique Identity Provider '
- 'identification. Usually FQDN is set with a suffix. '
- 'A value is required to generate IDP Metadata. '
- 'For example: https://keystone.example.com/v3/'
- 'OS-FEDERATION/saml2/idp'),
- cfg.StrOpt('idp_sso_endpoint',
- help='Identity Provider Single-Sign-On service value, '
- 'required in the Identity Provider\'s metadata. '
- 'A value is required to generate IDP Metadata. '
- 'For example: https://keystone.example.com/v3/'
- 'OS-FEDERATION/saml2/sso'),
- cfg.StrOpt('idp_lang', default='en',
- help='Language used by the organization.'),
- cfg.StrOpt('idp_organization_name',
- help='Organization name the installation belongs to.'),
- cfg.StrOpt('idp_organization_display_name',
- help='Organization name to be displayed.'),
- cfg.StrOpt('idp_organization_url',
- help='URL of the organization.'),
- cfg.StrOpt('idp_contact_company',
- help='Company of contact person.'),
- cfg.StrOpt('idp_contact_name',
- help='Given name of contact person'),
- cfg.StrOpt('idp_contact_surname',
- help='Surname of contact person.'),
- cfg.StrOpt('idp_contact_email',
- help='Email address of contact person.'),
- cfg.StrOpt('idp_contact_telephone',
- help='Telephone number of contact person.'),
- cfg.StrOpt('idp_contact_type', default='other',
- choices=['technical', 'support', 'administrative',
- 'billing', 'other'],
- help='The contact type describing the main point of '
- 'contact for the identity provider.'),
- cfg.StrOpt('idp_metadata_path',
- default='/etc/keystone/saml2_idp_metadata.xml',
- help='Path to the Identity Provider Metadata file. '
- 'This file should be generated with the '
- 'keystone-manage saml_idp_metadata command.'),
- cfg.StrOpt('relay_state_prefix',
- default='ss:mem:',
- help='The prefix to use for the RelayState SAML '
- 'attribute, used when generating ECP wrapped '
- 'assertions.'),
- ],
- 'eventlet_server': [
- cfg.IntOpt('public_workers',
- deprecated_name='public_workers',
- deprecated_group='DEFAULT',
- deprecated_for_removal=True,
- help='The number of worker processes to serve the public '
- 'eventlet application. Defaults to number of CPUs '
- '(minimum of 2).'),
- cfg.IntOpt('admin_workers',
- deprecated_name='admin_workers',
- deprecated_group='DEFAULT',
- deprecated_for_removal=True,
- help='The number of worker processes to serve the admin '
- 'eventlet application. Defaults to number of CPUs '
- '(minimum of 2).'),
- cfg.StrOpt('public_bind_host',
- default='0.0.0.0', # nosec : Bind to all interfaces by
- # default for backwards compatibility.
- deprecated_opts=[cfg.DeprecatedOpt('bind_host',
- group='DEFAULT'),
- cfg.DeprecatedOpt('public_bind_host',
- group='DEFAULT'), ],
- deprecated_for_removal=True,
- help='The IP address of the network interface for the '
- 'public service to listen on.'),
- cfg.PortOpt('public_port', default=5000,
- deprecated_name='public_port',
- deprecated_group='DEFAULT',
- deprecated_for_removal=True,
- help='The port number which the public service listens '
- 'on.'),
- cfg.StrOpt('admin_bind_host',
- default='0.0.0.0', # nosec : Bind to all interfaces by
- # default for backwards compatibility.
- deprecated_opts=[cfg.DeprecatedOpt('bind_host',
- group='DEFAULT'),
- cfg.DeprecatedOpt('admin_bind_host',
- group='DEFAULT')],
- deprecated_for_removal=True,
- help='The IP address of the network interface for the '
- 'admin service to listen on.'),
- cfg.PortOpt('admin_port', default=35357,
- deprecated_name='admin_port',
- deprecated_group='DEFAULT',
- deprecated_for_removal=True,
- help='The port number which the admin service listens '
- 'on.'),
- cfg.BoolOpt('wsgi_keep_alive', default=True,
- help='If set to false, disables keepalives on the server; '
- 'all connections will be closed after serving one '
- 'request.'),
- cfg.IntOpt('client_socket_timeout', default=900,
- help='Timeout for socket operations on a client '
- 'connection. If an incoming connection is idle for '
- 'this number of seconds it will be closed. A value '
- 'of "0" means wait forever.'),
- cfg.BoolOpt('tcp_keepalive', default=False,
- deprecated_name='tcp_keepalive',
- deprecated_group='DEFAULT',
- deprecated_for_removal=True,
- help='Set this to true if you want to enable '
- 'TCP_KEEPALIVE on server sockets, i.e. sockets used '
- 'by the Keystone wsgi server for client '
- 'connections.'),
- cfg.IntOpt('tcp_keepidle',
- default=600,
- deprecated_name='tcp_keepidle',
- deprecated_group='DEFAULT',
- deprecated_for_removal=True,
- help='Sets the value of TCP_KEEPIDLE in seconds for each '
- 'server socket. Only applies if tcp_keepalive is '
- 'true. Ignored if system does not support it.'),
- ],
- 'eventlet_server_ssl': [
- cfg.BoolOpt('enable', default=False, deprecated_name='enable',
- deprecated_group='ssl',
- deprecated_for_removal=True,
- help='Toggle for SSL support on the Keystone '
- 'eventlet servers.'),
- cfg.StrOpt('certfile',
- default='/etc/keystone/ssl/certs/keystone.pem',
- deprecated_name='certfile', deprecated_group='ssl',
- deprecated_for_removal=True,
- help='Path of the certfile for SSL. For non-production '
- 'environments, you may be interested in using '
- '`keystone-manage ssl_setup` to generate self-signed '
- 'certificates.'),
- cfg.StrOpt('keyfile',
- default='/etc/keystone/ssl/private/keystonekey.pem',
- deprecated_name='keyfile', deprecated_group='ssl',
- deprecated_for_removal=True,
- help='Path of the keyfile for SSL.'),
- cfg.StrOpt('ca_certs',
- default='/etc/keystone/ssl/certs/ca.pem',
- deprecated_name='ca_certs', deprecated_group='ssl',
- deprecated_for_removal=True,
- help='Path of the CA cert file for SSL.'),
- cfg.BoolOpt('cert_required', default=False,
- deprecated_name='cert_required', deprecated_group='ssl',
- deprecated_for_removal=True,
- help='Require client certificate.'),
- ],
-}
-
-
-CONF = cfg.CONF
-oslo_messaging.set_transport_defaults(control_exchange='keystone')
-
-
-def _register_auth_plugin_opt(conf, option):
- conf.register_opt(option, group='auth')
-
-
-def setup_authentication(conf=None):
- # register any non-default auth methods here (used by extensions, etc)
- if conf is None:
- conf = CONF
- for method_name in conf.auth.methods:
- if method_name not in _DEFAULT_AUTH_METHODS:
- option = cfg.StrOpt(method_name)
- _register_auth_plugin_opt(conf, option)
-
-
-def set_default_for_default_log_levels():
- """Set the default for the default_log_levels option for keystone.
-
- Keystone uses some packages that other OpenStack services don't use that do
- logging. This will set the default_log_levels default level for those
- packages.
-
- This function needs to be called before CONF().
-
- """
- extra_log_level_defaults = [
- 'dogpile=INFO',
- 'routes=INFO',
- ]
-
- log.register_options(CONF)
- log.set_defaults(default_log_levels=log.get_default_log_levels() +
- extra_log_level_defaults)
-
-
-def setup_logging():
- """Sets up logging for the keystone package."""
- log.setup(CONF, 'keystone')
- logging.captureWarnings(True)
-
-
-def find_paste_config():
- """Find Keystone's paste.deploy configuration file.
-
- Keystone's paste.deploy configuration file is specified in the
- ``[paste_deploy]`` section of the main Keystone configuration file,
- ``keystone.conf``.
-
- For example::
-
- [paste_deploy]
- config_file = keystone-paste.ini
-
- :returns: The selected configuration filename
- :raises: exception.ConfigFileNotFound
-
- """
- if CONF.paste_deploy.config_file:
- paste_config = CONF.paste_deploy.config_file
- paste_config_value = paste_config
- if not os.path.isabs(paste_config):
- paste_config = CONF.find_file(paste_config)
- elif CONF.config_file:
- paste_config = CONF.config_file[0]
- paste_config_value = paste_config
- else:
- # this provides backwards compatibility for keystone.conf files that
- # still have the entire paste configuration included, rather than just
- # a [paste_deploy] configuration section referring to an external file
- paste_config = CONF.find_file('keystone.conf')
- paste_config_value = 'keystone.conf'
- if not paste_config or not os.path.exists(paste_config):
- raise exception.ConfigFileNotFound(config_file=paste_config_value)
- return paste_config
-
-
-def configure(conf=None):
- if conf is None:
- conf = CONF
-
- conf.register_cli_opt(
- cfg.BoolOpt('standard-threads', default=False,
- help='Do not monkey-patch threading system modules.'))
- conf.register_cli_opt(
- cfg.StrOpt('pydev-debug-host',
- help='Host to connect to for remote debugger.'))
- conf.register_cli_opt(
- cfg.PortOpt('pydev-debug-port',
- help='Port to connect to for remote debugger.'))
-
- for section in FILE_OPTIONS:
- for option in FILE_OPTIONS[section]:
- if section:
- conf.register_opt(option, group=section)
- else:
- conf.register_opt(option)
-
- # register any non-default auth methods here (used by extensions, etc)
- setup_authentication(conf)
- # add oslo.cache related config options
- cache.configure(conf)
-
-
-def list_opts():
- """Return a list of oslo_config options available in Keystone.
-
- The returned list includes all oslo_config options which are registered as
- the "FILE_OPTIONS" in keystone.common.config. This list will not include
- the options from the oslo-incubator library or any options registered
- dynamically at run time.
-
- Each object in the list is a two element tuple. The first element of
- each tuple is the name of the group under which the list of options in the
- second element will be registered. A group name of None corresponds to the
- [DEFAULT] group in config files.
-
- This function is also discoverable via the 'oslo_config.opts' entry point
- under the 'keystone.config.opts' namespace.
-
- The purpose of this is to allow tools like the Oslo sample config file
- generator to discover the options exposed to users by this library.
-
- :returns: a list of (group_name, opts) tuples
- """
- return list(FILE_OPTIONS.items())
-
-
-def set_middleware_defaults():
- """Update default configuration options for oslo.middleware."""
- # CORS Defaults
- # TODO(krotscheck): Update with https://review.openstack.org/#/c/285368/
- cfg.set_defaults(cors.CORS_OPTS,
- allow_headers=['X-Auth-Token',
- 'X-Openstack-Request-Id',
- 'X-Subject-Token',
- 'X-Project-Id',
- 'X-Project-Name',
- 'X-Project-Domain-Id',
- 'X-Project-Domain-Name',
- 'X-Domain-Id',
- 'X-Domain-Name'],
- expose_headers=['X-Auth-Token',
- 'X-Openstack-Request-Id',
- 'X-Subject-Token'],
- allow_methods=['GET',
- 'PUT',
- 'POST',
- 'DELETE',
- 'PATCH']
- )
-
-
-def set_config_defaults():
- """Override all configuration default values for keystone."""
- set_default_for_default_log_levels()
- set_middleware_defaults()
diff --git a/keystone-moon/keystone/common/controller.py b/keystone-moon/keystone/common/controller.py
deleted file mode 100644
index 8672525f..00000000
--- a/keystone-moon/keystone/common/controller.py
+++ /dev/null
@@ -1,835 +0,0 @@
-# Copyright 2013 OpenStack Foundation
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import functools
-import uuid
-
-from oslo_config import cfg
-from oslo_log import log
-from oslo_log import versionutils
-from oslo_utils import strutils
-import six
-
-from keystone.common import authorization
-from keystone.common import dependency
-from keystone.common import driver_hints
-from keystone.common import utils
-from keystone.common import wsgi
-from keystone import exception
-from keystone.i18n import _, _LW
-from keystone.models import token_model
-
-
-LOG = log.getLogger(__name__)
-CONF = cfg.CONF
-
-
-def v2_deprecated(f):
- @six.wraps(f)
- def wrapper(*args, **kwargs):
- deprecated = versionutils.deprecated(
- what=f.__name__ + ' of the v2 API',
- as_of=versionutils.deprecated.MITAKA,
- in_favor_of='a similar function in the v3 API',
- remove_in=+4)
- return deprecated(f)
- return wrapper()
-
-
-def v2_ec2_deprecated(f):
- @six.wraps(f)
- def wrapper(*args, **kwargs):
- deprecated = versionutils.deprecated(
- what=f.__name__ + ' of the v2 EC2 APIs',
- as_of=versionutils.deprecated.MITAKA,
- in_favor_of=('a similar function in the v3 Credential APIs'),
- remove_in=0)
- return deprecated(f)
- return wrapper()
-
-
-def v2_auth_deprecated(f):
- @six.wraps(f)
- def wrapper(*args, **kwargs):
- deprecated = versionutils.deprecated(
- what=f.__name__ + ' of the v2 Authentication APIs',
- as_of=versionutils.deprecated.MITAKA,
- in_favor_of=('a similar function in the v3 Authentication APIs'),
- remove_in=0)
- return deprecated(f)
- return wrapper()
-
-
-def _build_policy_check_credentials(self, action, context, kwargs):
- kwargs_str = ', '.join(['%s=%s' % (k, kwargs[k]) for k in kwargs])
- kwargs_str = strutils.mask_password(kwargs_str)
-
- LOG.debug('RBAC: Authorizing %(action)s(%(kwargs)s)', {
- 'action': action,
- 'kwargs': kwargs_str})
-
- # see if auth context has already been created. If so use it.
- if ('environment' in context and
- authorization.AUTH_CONTEXT_ENV in context['environment']):
- LOG.debug('RBAC: using auth context from the request environment')
- return context['environment'].get(authorization.AUTH_CONTEXT_ENV)
-
- # There is no current auth context, build it from the incoming token.
- # TODO(morganfainberg): Collapse this logic with AuthContextMiddleware
- # in a sane manner as this just mirrors the logic in AuthContextMiddleware
- try:
- LOG.debug('RBAC: building auth context from the incoming auth token')
- token_ref = token_model.KeystoneToken(
- token_id=context['token_id'],
- token_data=self.token_provider_api.validate_token(
- context['token_id']))
- # NOTE(jamielennox): whilst this maybe shouldn't be within this
- # function it would otherwise need to reload the token_ref from
- # backing store.
- wsgi.validate_token_bind(context, token_ref)
- except exception.TokenNotFound:
- LOG.warning(_LW('RBAC: Invalid token'))
- raise exception.Unauthorized()
-
- auth_context = authorization.token_to_auth_context(token_ref)
-
- return auth_context
-
-
-def protected(callback=None):
- """Wraps API calls with role based access controls (RBAC).
-
- This handles both the protection of the API parameters as well as any
- target entities for single-entity API calls.
-
- More complex API calls (for example that deal with several different
- entities) should pass in a callback function, that will be subsequently
- called to check protection for these multiple entities. This callback
- function should gather the appropriate entities needed and then call
- check_protection() in the V3Controller class.
-
- """
- def wrapper(f):
- @functools.wraps(f)
- def inner(self, context, *args, **kwargs):
- if 'is_admin' in context and context['is_admin']:
- LOG.warning(_LW('RBAC: Bypassing authorization'))
- elif callback is not None:
- prep_info = {'f_name': f.__name__,
- 'input_attr': kwargs}
- callback(self, context, prep_info, *args, **kwargs)
- else:
- action = 'identity:%s' % f.__name__
- creds = _build_policy_check_credentials(self, action,
- context, kwargs)
-
- policy_dict = {}
-
- # Check to see if we need to include the target entity in our
- # policy checks. We deduce this by seeing if the class has
- # specified a get_member() method and that kwargs contains the
- # appropriate entity id.
- if (hasattr(self, 'get_member_from_driver') and
- self.get_member_from_driver is not None):
- key = '%s_id' % self.member_name
- if key in kwargs:
- ref = self.get_member_from_driver(kwargs[key])
- policy_dict['target'] = {self.member_name: ref}
-
- # TODO(henry-nash): Move this entire code to a member
- # method inside v3 Auth
- if context.get('subject_token_id') is not None:
- token_ref = token_model.KeystoneToken(
- token_id=context['subject_token_id'],
- token_data=self.token_provider_api.validate_token(
- context['subject_token_id']))
- policy_dict.setdefault('target', {})
- policy_dict['target'].setdefault(self.member_name, {})
- policy_dict['target'][self.member_name]['user_id'] = (
- token_ref.user_id)
- try:
- user_domain_id = token_ref.user_domain_id
- except exception.UnexpectedError:
- user_domain_id = None
- if user_domain_id:
- policy_dict['target'][self.member_name].setdefault(
- 'user', {})
- policy_dict['target'][self.member_name][
- 'user'].setdefault('domain', {})
- policy_dict['target'][self.member_name]['user'][
- 'domain']['id'] = (
- user_domain_id)
-
- # Add in the kwargs, which means that any entity provided as a
- # parameter for calls like create and update will be included.
- policy_dict.update(kwargs)
- self.policy_api.enforce(creds,
- action,
- utils.flatten_dict(policy_dict))
- LOG.debug('RBAC: Authorization granted')
- return f(self, context, *args, **kwargs)
- return inner
- return wrapper
-
-
-def filterprotected(*filters, **callback):
- """Wraps API list calls with role based access controls (RBAC).
-
- This handles both the protection of the API parameters as well as any
- filters supplied.
-
- More complex API list calls (for example that need to examine the contents
- of an entity referenced by one of the filters) should pass in a callback
- function, that will be subsequently called to check protection for these
- multiple entities. This callback function should gather the appropriate
- entities needed and then call check_protection() in the V3Controller class.
-
- """
- def _filterprotected(f):
- @functools.wraps(f)
- def wrapper(self, context, **kwargs):
- if not context['is_admin']:
- # The target dict for the policy check will include:
- #
- # - Any query filter parameters
- # - Data from the main url (which will be in the kwargs
- # parameter), which although most of our APIs do not utilize,
- # in theory you could have.
- #
-
- # First build the dict of filter parameters
- target = dict()
- if filters:
- for item in filters:
- if item in context['query_string']:
- target[item] = context['query_string'][item]
-
- LOG.debug('RBAC: Adding query filter params (%s)', (
- ', '.join(['%s=%s' % (item, target[item])
- for item in target])))
-
- if 'callback' in callback and callback['callback'] is not None:
- # A callback has been specified to load additional target
- # data, so pass it the formal url params as well as the
- # list of filters, so it can augment these and then call
- # the check_protection() method.
- prep_info = {'f_name': f.__name__,
- 'input_attr': kwargs,
- 'filter_attr': target}
- callback['callback'](self, context, prep_info, **kwargs)
- else:
- # No callback, so we are going to check the protection here
- action = 'identity:%s' % f.__name__
- creds = _build_policy_check_credentials(self, action,
- context, kwargs)
- # Add in any formal url parameters
- for key in kwargs:
- target[key] = kwargs[key]
-
- self.policy_api.enforce(creds,
- action,
- utils.flatten_dict(target))
-
- LOG.debug('RBAC: Authorization granted')
- else:
- LOG.warning(_LW('RBAC: Bypassing authorization'))
- return f(self, context, filters, **kwargs)
- return wrapper
- return _filterprotected
-
-
-class V2Controller(wsgi.Application):
- """Base controller class for Identity API v2."""
-
- def _normalize_domain_id(self, context, ref):
- """Fill in domain_id since v2 calls are not domain-aware.
-
- This will overwrite any domain_id that was inadvertently
- specified in the v2 call.
-
- """
- ref['domain_id'] = CONF.identity.default_domain_id
- return ref
-
- @staticmethod
- def filter_domain_id(ref):
- """Remove domain_id since v2 calls are not domain-aware."""
- ref.pop('domain_id', None)
- return ref
-
- @staticmethod
- def filter_domain(ref):
- """Remove domain since v2 calls are not domain-aware."""
- ref.pop('domain', None)
- return ref
-
- @staticmethod
- def filter_project_parent_id(ref):
- """Remove parent_id since v2 calls are not hierarchy-aware."""
- ref.pop('parent_id', None)
- return ref
-
- @staticmethod
- def filter_is_domain(ref):
- """Remove is_domain field since v2 calls are not domain-aware."""
- ref.pop('is_domain', None)
- return ref
-
- @staticmethod
- def normalize_username_in_response(ref):
- """Adds username to outgoing user refs to match the v2 spec.
-
- Internally we use `name` to represent a user's name. The v2 spec
- requires the use of `username` instead.
-
- """
- if 'username' not in ref and 'name' in ref:
- ref['username'] = ref['name']
- return ref
-
- @staticmethod
- def normalize_username_in_request(ref):
- """Adds name in incoming user refs to match the v2 spec.
-
- Internally we use `name` to represent a user's name. The v2 spec
- requires the use of `username` instead.
-
- """
- if 'name' not in ref and 'username' in ref:
- ref['name'] = ref.pop('username')
- return ref
-
- @staticmethod
- def v3_to_v2_user(ref):
- """Convert a user_ref from v3 to v2 compatible.
-
- * v2.0 users are not domain aware, and should have domain_id removed
- * v2.0 users expect the use of tenantId instead of default_project_id
- * v2.0 users have a username attribute
-
- If ref is a list type, we will iterate through each element and do the
- conversion.
- """
- def _format_default_project_id(ref):
- """Convert default_project_id to tenantId for v2 calls."""
- default_project_id = ref.pop('default_project_id', None)
- if default_project_id is not None:
- ref['tenantId'] = default_project_id
- elif 'tenantId' in ref:
- # NOTE(morganfainberg): To avoid v2.0 confusion if somehow a
- # tenantId property sneaks its way into the extra blob on the
- # user, we remove it here. If default_project_id is set, we
- # would override it in either case.
- del ref['tenantId']
-
- def _normalize_and_filter_user_properties(ref):
- """Run through the various filter/normalization methods."""
- _format_default_project_id(ref)
- V2Controller.filter_domain(ref)
- V2Controller.filter_domain_id(ref)
- V2Controller.normalize_username_in_response(ref)
- return ref
-
- if isinstance(ref, dict):
- return _normalize_and_filter_user_properties(ref)
- elif isinstance(ref, list):
- return [_normalize_and_filter_user_properties(x) for x in ref]
- else:
- raise ValueError(_('Expected dict or list: %s') % type(ref))
-
- @staticmethod
- def v3_to_v2_project(ref):
- """Convert a project_ref from v3 to v2.
-
- * v2.0 projects are not domain aware, and should have domain_id removed
- * v2.0 projects are not hierarchy aware, and should have parent_id
- removed
-
- This method should only be applied to project_refs being returned from
- the v2.0 controller(s).
-
- If ref is a list type, we will iterate through each element and do the
- conversion.
- """
- def _filter_project_properties(ref):
- """Run through the various filter methods."""
- V2Controller.filter_domain_id(ref)
- V2Controller.filter_project_parent_id(ref)
- V2Controller.filter_is_domain(ref)
- return ref
-
- if isinstance(ref, dict):
- return _filter_project_properties(ref)
- elif isinstance(ref, list):
- return [_filter_project_properties(x) for x in ref]
- else:
- raise ValueError(_('Expected dict or list: %s') % type(ref))
-
- def format_project_list(self, tenant_refs, **kwargs):
- """Format a v2 style project list, including marker/limits."""
- marker = kwargs.get('marker')
- first_index = 0
- if marker is not None:
- for (marker_index, tenant) in enumerate(tenant_refs):
- if tenant['id'] == marker:
- # we start pagination after the marker
- first_index = marker_index + 1
- break
- else:
- msg = _('Marker could not be found')
- raise exception.ValidationError(message=msg)
-
- limit = kwargs.get('limit')
- last_index = None
- if limit is not None:
- try:
- limit = int(limit)
- if limit < 0:
- raise AssertionError()
- except (ValueError, AssertionError):
- msg = _('Invalid limit value')
- raise exception.ValidationError(message=msg)
- last_index = first_index + limit
-
- tenant_refs = tenant_refs[first_index:last_index]
-
- for x in tenant_refs:
- if 'enabled' not in x:
- x['enabled'] = True
- o = {'tenants': tenant_refs,
- 'tenants_links': []}
- return o
-
-
-@dependency.requires('policy_api', 'token_provider_api')
-class V3Controller(wsgi.Application):
- """Base controller class for Identity API v3.
-
- Child classes should set the ``collection_name`` and ``member_name`` class
- attributes, representing the collection of entities they are exposing to
- the API. This is required for supporting self-referential links,
- pagination, etc.
-
- Class parameters:
-
- * `_public_parameters` - set of parameters that are exposed to the user.
- Usually used by cls.filter_params()
-
- """
-
- collection_name = 'entities'
- member_name = 'entity'
- get_member_from_driver = None
-
- @classmethod
- def base_url(cls, context, path=None):
- endpoint = super(V3Controller, cls).base_url(context, 'public')
- if not path:
- path = cls.collection_name
-
- return '%s/%s/%s' % (endpoint, 'v3', path.lstrip('/'))
-
- def get_auth_context(self, context):
- # TODO(dolphm): this method of accessing the auth context is terrible,
- # but context needs to be refactored to always have reasonable values.
- env_context = context.get('environment', {})
- return env_context.get(authorization.AUTH_CONTEXT_ENV, {})
-
- @classmethod
- def full_url(cls, context, path=None):
- url = cls.base_url(context, path)
- if context['environment'].get('QUERY_STRING'):
- url = '%s?%s' % (url, context['environment']['QUERY_STRING'])
-
- return url
-
- @classmethod
- def query_filter_is_true(cls, filter_value):
- """Determine if bool query param is 'True'.
-
- We treat this the same way as we do for policy
- enforcement:
-
- {bool_param}=0 is treated as False
-
- Any other value is considered to be equivalent to
- True, including the absence of a value
-
- """
- if (isinstance(filter_value, six.string_types) and
- filter_value == '0'):
- val = False
- else:
- val = True
- return val
-
- @classmethod
- def _add_self_referential_link(cls, context, ref):
- ref.setdefault('links', {})
- ref['links']['self'] = cls.base_url(context) + '/' + ref['id']
-
- @classmethod
- def wrap_member(cls, context, ref):
- cls._add_self_referential_link(context, ref)
- return {cls.member_name: ref}
-
- @classmethod
- def wrap_collection(cls, context, refs, hints=None):
- """Wrap a collection, checking for filtering and pagination.
-
- Returns the wrapped collection, which includes:
- - Executing any filtering not already carried out
- - Truncate to a set limit if necessary
- - Adds 'self' links in every member
- - Adds 'next', 'self' and 'prev' links for the whole collection.
-
- :param context: the current context, containing the original url path
- and query string
- :param refs: the list of members of the collection
- :param hints: list hints, containing any relevant filters and limit.
- Any filters already satisfied by managers will have been
- removed
- """
- # Check if there are any filters in hints that were not
- # handled by the drivers. The driver will not have paginated or
- # limited the output if it found there were filters it was unable to
- # handle.
-
- if hints is not None:
- refs = cls.filter_by_attributes(refs, hints)
-
- list_limited, refs = cls.limit(refs, hints)
-
- for ref in refs:
- cls.wrap_member(context, ref)
-
- container = {cls.collection_name: refs}
- container['links'] = {
- 'next': None,
- 'self': cls.full_url(context, path=context['path']),
- 'previous': None}
-
- if list_limited:
- container['truncated'] = True
-
- return container
-
- @classmethod
- def limit(cls, refs, hints):
- """Limits a list of entities.
-
- The underlying driver layer may have already truncated the collection
- for us, but in case it was unable to handle truncation we check here.
-
- :param refs: the list of members of the collection
- :param hints: hints, containing, among other things, the limit
- requested
-
- :returns: boolean indicating whether the list was truncated, as well
- as the list of (truncated if necessary) entities.
-
- """
- NOT_LIMITED = False
- LIMITED = True
-
- if hints is None or hints.limit is None:
- # No truncation was requested
- return NOT_LIMITED, refs
-
- if hints.limit.get('truncated', False):
- # The driver did truncate the list
- return LIMITED, refs
-
- if len(refs) > hints.limit['limit']:
- # The driver layer wasn't able to truncate it for us, so we must
- # do it here
- return LIMITED, refs[:hints.limit['limit']]
-
- return NOT_LIMITED, refs
-
- @classmethod
- def filter_by_attributes(cls, refs, hints):
- """Filters a list of references by filter values."""
- def _attr_match(ref_attr, val_attr):
- """Matches attributes allowing for booleans as strings.
-
- We test explicitly for a value that defines it as 'False',
- which also means that the existence of the attribute with
- no value implies 'True'
-
- """
- if type(ref_attr) is bool:
- return ref_attr == utils.attr_as_boolean(val_attr)
- else:
- return ref_attr == val_attr
-
- def _inexact_attr_match(filter, ref):
- """Applies an inexact filter to a result dict.
-
- :param filter: the filter in question
- :param ref: the dict to check
-
- :returns: True if there is a match
-
- """
- comparator = filter['comparator']
- key = filter['name']
-
- if key in ref:
- filter_value = filter['value']
- target_value = ref[key]
- if not filter['case_sensitive']:
- # We only support inexact filters on strings so
- # it's OK to use lower()
- filter_value = filter_value.lower()
- target_value = target_value.lower()
-
- if comparator == 'contains':
- return (filter_value in target_value)
- elif comparator == 'startswith':
- return target_value.startswith(filter_value)
- elif comparator == 'endswith':
- return target_value.endswith(filter_value)
- else:
- # We silently ignore unsupported filters
- return True
-
- return False
-
- for filter in hints.filters:
- if filter['comparator'] == 'equals':
- attr = filter['name']
- value = filter['value']
- refs = [r for r in refs if _attr_match(
- utils.flatten_dict(r).get(attr), value)]
- else:
- # It might be an inexact filter
- refs = [r for r in refs if _inexact_attr_match(
- filter, r)]
-
- return refs
-
- @classmethod
- def build_driver_hints(cls, context, supported_filters):
- """Build list hints based on the context query string.
-
- :param context: contains the query_string from which any list hints can
- be extracted
- :param supported_filters: list of filters supported, so ignore any
- keys in query_dict that are not in this list.
-
- """
- query_dict = context['query_string']
- hints = driver_hints.Hints()
-
- if query_dict is None:
- return hints
-
- for key in query_dict:
- # Check if this is an exact filter
- if supported_filters is None or key in supported_filters:
- hints.add_filter(key, query_dict[key])
- continue
-
- # Check if it is an inexact filter
- for valid_key in supported_filters:
- # See if this entry in query_dict matches a known key with an
- # inexact suffix added. If it doesn't match, then that just
- # means that there is no inexact filter for that key in this
- # query.
- if not key.startswith(valid_key + '__'):
- continue
-
- base_key, comparator = key.split('__', 1)
-
- # We map the query-style inexact of, for example:
- #
- # {'email__contains', 'myISP'}
- #
- # into a list directive add filter call parameters of:
- #
- # name = 'email'
- # value = 'myISP'
- # comparator = 'contains'
- # case_sensitive = True
-
- case_sensitive = True
- if comparator.startswith('i'):
- case_sensitive = False
- comparator = comparator[1:]
- hints.add_filter(base_key, query_dict[key],
- comparator=comparator,
- case_sensitive=case_sensitive)
-
- # NOTE(henry-nash): If we were to support pagination, we would pull any
- # pagination directives out of the query_dict here, and add them into
- # the hints list.
- return hints
-
- def _require_matching_id(self, value, ref):
- """Ensures the value matches the reference's ID, if any."""
- if 'id' in ref and ref['id'] != value:
- raise exception.ValidationError('Cannot change ID')
-
- def _require_matching_domain_id(self, ref_id, ref, get_member):
- """Ensure the current domain ID matches the reference one, if any.
-
- Provided we want domain IDs to be immutable, check whether any
- domain_id specified in the ref dictionary matches the existing
- domain_id for this entity.
-
- :param ref_id: the ID of the entity
- :param ref: the dictionary of new values proposed for this entity
- :param get_member: The member function to call to get the current
- entity
- :raises: :class:`keystone.exception.ValidationError`
-
- """
- # TODO(henry-nash): It might be safer and more efficient to do this
- # check in the managers affected, so look to migrate this check to
- # there in the future.
- if CONF.domain_id_immutable and 'domain_id' in ref:
- existing_ref = get_member(ref_id)
- if ref['domain_id'] != existing_ref['domain_id']:
- raise exception.ValidationError(_('Cannot change Domain ID'))
-
- def _assign_unique_id(self, ref):
- """Generates and assigns a unique identifier to a reference."""
- ref = ref.copy()
- ref['id'] = uuid.uuid4().hex
- return ref
-
- def _get_domain_id_for_list_request(self, context):
- """Get the domain_id for a v3 list call.
-
- If we running with multiple domain drivers, then the caller must
- specify a domain_id either as a filter or as part of the token scope.
-
- """
- if not CONF.identity.domain_specific_drivers_enabled:
- # We don't need to specify a domain ID in this case
- return
-
- if context['query_string'].get('domain_id') is not None:
- return context['query_string'].get('domain_id')
-
- token_ref = utils.get_token_ref(context)
-
- if token_ref.domain_scoped:
- return token_ref.domain_id
- elif token_ref.project_scoped:
- return token_ref.project_domain_id
- else:
- LOG.warning(
- _LW('No domain information specified as part of list request'))
- raise exception.Unauthorized()
-
- def _get_domain_id_from_token(self, context):
- """Get the domain_id for a v3 create call.
-
- In the case of a v3 create entity call that does not specify a domain
- ID, the spec says that we should use the domain scoping from the token
- being used.
-
- """
- try:
- token_ref = utils.get_token_ref(context)
- except exception.Unauthorized:
- if context.get('is_admin'):
- raise exception.ValidationError(
- _('You have tried to create a resource using the admin '
- 'token. As this token is not within a domain you must '
- 'explicitly include a domain for this resource to '
- 'belong to.'))
- raise
-
- if token_ref.domain_scoped:
- return token_ref.domain_id
- else:
- # TODO(henry-nash): We should issue an exception here since if
- # a v3 call does not explicitly specify the domain_id in the
- # entity, it should be using a domain scoped token. However,
- # the current tempest heat tests issue a v3 call without this.
- # This is raised as bug #1283539. Once this is fixed, we
- # should remove the line below and replace it with an error.
- #
- # Ahead of actually changing the code to raise an exception, we
- # issue a deprecation warning.
- versionutils.report_deprecated_feature(
- LOG,
- _LW('Not specifying a domain during a create user, group or '
- 'project call, and relying on falling back to the '
- 'default domain, is deprecated as of Liberty and will be '
- 'removed in the N release. Specify the domain explicitly '
- 'or use a domain-scoped token'))
- return CONF.identity.default_domain_id
-
- def _normalize_domain_id(self, context, ref):
- """Fill in domain_id if not specified in a v3 call."""
- if not ref.get('domain_id'):
- ref['domain_id'] = self._get_domain_id_from_token(context)
- return ref
-
- @staticmethod
- def filter_domain_id(ref):
- """Override v2 filter to let domain_id out for v3 calls."""
- return ref
-
- def check_protection(self, context, prep_info, target_attr=None):
- """Provide call protection for complex target attributes.
-
- As well as including the standard parameters from the original API
- call (which is passed in prep_info), this call will add in any
- additional entities or attributes (passed in target_attr), so that
- they can be referenced by policy rules.
-
- """
- if 'is_admin' in context and context['is_admin']:
- LOG.warning(_LW('RBAC: Bypassing authorization'))
- else:
- action = 'identity:%s' % prep_info['f_name']
- # TODO(henry-nash) need to log the target attributes as well
- creds = _build_policy_check_credentials(self, action,
- context,
- prep_info['input_attr'])
- # Build the dict the policy engine will check against from both the
- # parameters passed into the call we are protecting (which was
- # stored in the prep_info by protected()), plus the target
- # attributes provided.
- policy_dict = {}
- if target_attr:
- policy_dict = {'target': target_attr}
- policy_dict.update(prep_info['input_attr'])
- if 'filter_attr' in prep_info:
- policy_dict.update(prep_info['filter_attr'])
- self.policy_api.enforce(creds,
- action,
- utils.flatten_dict(policy_dict))
- LOG.debug('RBAC: Authorization granted')
-
- @classmethod
- def filter_params(cls, ref):
- """Remove unspecified parameters from the dictionary.
-
- This function removes unspecified parameters from the dictionary.
- This method checks only root-level keys from a ref dictionary.
-
- :param ref: a dictionary representing deserialized response to be
- serialized
- """
- ref_keys = set(ref.keys())
- blocked_keys = ref_keys - cls._public_parameters
- for blocked_param in blocked_keys:
- del ref[blocked_param]
- return ref
diff --git a/keystone-moon/keystone/common/dependency.py b/keystone-moon/keystone/common/dependency.py
deleted file mode 100644
index d52a1ec5..00000000
--- a/keystone-moon/keystone/common/dependency.py
+++ /dev/null
@@ -1,230 +0,0 @@
-# Copyright 2012 OpenStack Foundation
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-"""This module provides support for dependency injection.
-
-Providers are registered via the ``@provider()`` decorator, and dependencies on
-them are registered with ``@requires()``. Providers are available to their
-consumers via an attribute. See the documentation for the individual functions
-for more detail.
-
-See also:
-
- https://en.wikipedia.org/wiki/Dependency_injection
-
-"""
-
-import traceback
-
-from keystone.i18n import _
-
-
-_REGISTRY = {}
-
-_future_dependencies = {}
-_factories = {}
-
-
-def _set_provider(name, provider):
- _original_provider, where_registered = _REGISTRY.get(name, (None, None))
- if where_registered:
- raise Exception('%s already has a registered provider, at\n%s' %
- (name, ''.join(where_registered)))
- _REGISTRY[name] = (provider, traceback.format_stack())
-
-
-GET_REQUIRED = object()
-GET_OPTIONAL = object()
-
-
-def get_provider(name, optional=GET_REQUIRED):
- if optional is GET_REQUIRED:
- return _REGISTRY[name][0]
- return _REGISTRY.get(name, (None, None))[0]
-
-
-class UnresolvableDependencyException(Exception):
- """Raised when a required dependency is not resolvable.
-
- See ``resolve_future_dependencies()`` for more details.
-
- """
-
- def __init__(self, name, targets):
- msg = _('Unregistered dependency: %(name)s for %(targets)s') % {
- 'name': name, 'targets': targets}
- super(UnresolvableDependencyException, self).__init__(msg)
-
-
-def provider(name):
- """A class decorator used to register providers.
-
- When ``@provider()`` is used to decorate a class, members of that class
- will register themselves as providers for the named dependency. As an
- example, In the code fragment::
-
- @dependency.provider('foo_api')
- class Foo:
- def __init__(self):
- ...
-
- ...
-
- foo = Foo()
-
- The object ``foo`` will be registered as a provider for ``foo_api``. No
- more than one such instance should be created; additional instances will
- replace the previous ones, possibly resulting in different instances being
- used by different consumers.
-
- """
- def wrapper(cls):
- def wrapped(init):
- def __wrapped_init__(self, *args, **kwargs):
- """Initialize the wrapped object and add it to the registry."""
- init(self, *args, **kwargs)
- _set_provider(name, self)
- resolve_future_dependencies(__provider_name=name)
-
- return __wrapped_init__
-
- cls.__init__ = wrapped(cls.__init__)
- _factories[name] = cls
- return cls
- return wrapper
-
-
-def _process_dependencies(obj):
- # Any dependencies that can be resolved immediately are resolved.
- # Dependencies that cannot be resolved immediately are stored for
- # resolution in resolve_future_dependencies.
-
- def process(obj, attr_name, unresolved_in_out):
- for dependency in getattr(obj, attr_name, []):
- if dependency not in _REGISTRY:
- # We don't know about this dependency, so save it for later.
- unresolved_in_out.setdefault(dependency, []).append(obj)
- continue
-
- setattr(obj, dependency, get_provider(dependency))
-
- process(obj, '_dependencies', _future_dependencies)
-
-
-def requires(*dependencies):
- """A class decorator used to inject providers into consumers.
-
- The required providers will be made available to instances of the decorated
- class via an attribute with the same name as the provider. For example, in
- the code fragment::
-
- @dependency.requires('foo_api', 'bar_api')
- class FooBarClient:
- def __init__(self):
- ...
-
- ...
-
- client = FooBarClient()
-
- The object ``client`` will have attributes named ``foo_api`` and
- ``bar_api``, which are instances of the named providers.
-
- Objects must not rely on the existence of these attributes until after
- ``resolve_future_dependencies()`` has been called; they may not exist
- beforehand.
-
- Dependencies registered via ``@required()`` must have providers; if not,
- an ``UnresolvableDependencyException`` will be raised when
- ``resolve_future_dependencies()`` is called.
-
- """
- def wrapper(self, *args, **kwargs):
- """Inject each dependency from the registry."""
- self.__wrapped_init__(*args, **kwargs)
- _process_dependencies(self)
-
- def wrapped(cls):
- """Note the required dependencies on the object for later injection.
-
- The dependencies of the parent class are combined with that of the
- child class to create a new set of dependencies.
-
- """
- existing_dependencies = getattr(cls, '_dependencies', set())
- cls._dependencies = existing_dependencies.union(dependencies)
- if not hasattr(cls, '__wrapped_init__'):
- cls.__wrapped_init__ = cls.__init__
- cls.__init__ = wrapper
- return cls
-
- return wrapped
-
-
-def resolve_future_dependencies(__provider_name=None):
- """Forces injection of all dependencies.
-
- Before this function is called, circular dependencies may not have been
- injected. This function should be called only once, after all global
- providers are registered. If an object needs to be created after this
- call, it must not have circular dependencies.
-
- If any required dependencies are unresolvable, this function will raise an
- ``UnresolvableDependencyException``.
-
- Outside of this module, this function should be called with no arguments;
- the optional argument, ``__provider_name`` is used internally, and should
- be treated as an implementation detail.
-
- """
- new_providers = dict()
- if __provider_name:
- # A provider was registered, so take care of any objects depending on
- # it.
- targets = _future_dependencies.pop(__provider_name, [])
-
- for target in targets:
- setattr(target, __provider_name, get_provider(__provider_name))
-
- return
-
- # Resolve future dependencies, raises UnresolvableDependencyException if
- # there's no provider registered.
- try:
- for dependency, targets in _future_dependencies.copy().items():
- if dependency not in _REGISTRY:
- # a Class was registered that could fulfill the dependency, but
- # it has not yet been initialized.
- factory = _factories.get(dependency)
- if factory:
- provider = factory()
- new_providers[dependency] = provider
- else:
- raise UnresolvableDependencyException(dependency, targets)
-
- for target in targets:
- setattr(target, dependency, get_provider(dependency))
- finally:
- _future_dependencies.clear()
- return new_providers
-
-
-def reset():
- """Reset the registry of providers.
-
- This is useful for unit testing to ensure that tests don't use providers
- from previous tests.
- """
- _REGISTRY.clear()
- _future_dependencies.clear()
diff --git a/keystone-moon/keystone/common/driver_hints.py b/keystone-moon/keystone/common/driver_hints.py
deleted file mode 100644
index e7c2f2ef..00000000
--- a/keystone-moon/keystone/common/driver_hints.py
+++ /dev/null
@@ -1,115 +0,0 @@
-# Copyright 2013 OpenStack Foundation
-# Copyright 2013 IBM Corp.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import functools
-
-from keystone import exception
-from keystone.i18n import _
-
-
-def truncated(f):
- """Ensure list truncation is detected in Driver list entity methods.
-
- This is designed to wrap Driver list_{entity} methods in order to
- calculate if the resultant list has been truncated. Provided a limit dict
- is found in the hints list, we increment the limit by one so as to ask the
- wrapped function for one more entity than the limit, and then once the list
- has been generated, we check to see if the original limit has been
- exceeded, in which case we truncate back to that limit and set the
- 'truncated' boolean to 'true' in the hints limit dict.
-
- """
- @functools.wraps(f)
- def wrapper(self, hints, *args, **kwargs):
- if not hasattr(hints, 'limit'):
- raise exception.UnexpectedError(
- _('Cannot truncate a driver call without hints list as '
- 'first parameter after self '))
-
- if hints.limit is None:
- return f(self, hints, *args, **kwargs)
-
- # A limit is set, so ask for one more entry than we need
- list_limit = hints.limit['limit']
- hints.set_limit(list_limit + 1)
- ref_list = f(self, hints, *args, **kwargs)
-
- # If we got more than the original limit then trim back the list and
- # mark it truncated. In both cases, make sure we set the limit back
- # to its original value.
- if len(ref_list) > list_limit:
- hints.set_limit(list_limit, truncated=True)
- return ref_list[:list_limit]
- else:
- hints.set_limit(list_limit)
- return ref_list
- return wrapper
-
-
-class Hints(object):
- """Encapsulate driver hints for listing entities.
-
- Hints are modifiers that affect the return of entities from a
- list_<entities> operation. They are typically passed to a driver to give
- direction as to what filtering, pagination or list limiting actions are
- being requested.
-
- It is optional for a driver to action some or all of the list hints,
- but any filters that it does satisfy must be marked as such by calling
- removing the filter from the list.
-
- A Hint object contains filters, which is a list of dicts that can be
- accessed publicly. Also it contains a dict called limit, which will
- indicate the amount of data we want to limit our listing to.
-
- If the filter is discovered to never match, then `cannot_match` can be set
- to indicate that there will not be any matches and the backend work can be
- short-circuited.
-
- Each filter term consists of:
-
- * ``name``: the name of the attribute being matched
- * ``value``: the value against which it is being matched
- * ``comparator``: the operation, which can be one of ``equals``,
- ``contains``, ``startswith`` or ``endswith``
- * ``case_sensitive``: whether any comparison should take account of
- case
- * ``type``: will always be 'filter'
-
- """
-
- def __init__(self):
- self.limit = None
- self.filters = list()
- self.cannot_match = False
-
- def add_filter(self, name, value, comparator='equals',
- case_sensitive=False):
- """Adds a filter to the filters list, which is publicly accessible."""
- self.filters.append({'name': name, 'value': value,
- 'comparator': comparator,
- 'case_sensitive': case_sensitive,
- 'type': 'filter'})
-
- def get_exact_filter_by_name(self, name):
- """Return a filter key and value if exact filter exists for name."""
- for entry in self.filters:
- if (entry['type'] == 'filter' and entry['name'] == name and
- entry['comparator'] == 'equals'):
- return entry
-
- def set_limit(self, limit, truncated=False):
- """Set a limit to indicate the list should be truncated."""
- self.limit = {'limit': limit, 'type': 'limit', 'truncated': truncated}
diff --git a/keystone-moon/keystone/common/environment/__init__.py b/keystone-moon/keystone/common/environment/__init__.py
deleted file mode 100644
index 6748f115..00000000
--- a/keystone-moon/keystone/common/environment/__init__.py
+++ /dev/null
@@ -1,102 +0,0 @@
-# Copyright 2013 OpenStack Foundation
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import functools
-import os
-
-from oslo_log import log
-
-
-LOG = log.getLogger(__name__)
-
-
-__all__ = ('Server', 'httplib', 'subprocess')
-
-_configured = False
-
-Server = None
-httplib = None
-subprocess = None
-
-
-def configure_once(name):
- """Ensure that environment configuration is only run once.
-
- If environment is reconfigured in the same way then it is ignored.
- It is an error to attempt to reconfigure environment in a different way.
- """
- def decorator(func):
- @functools.wraps(func)
- def wrapper(*args, **kwargs):
- global _configured
- if _configured:
- if _configured == name:
- return
- else:
- raise SystemError("Environment has already been "
- "configured as %s" % _configured)
-
- LOG.debug("Environment configured as: %s", name)
- _configured = name
- return func(*args, **kwargs)
-
- return wrapper
- return decorator
-
-
-@configure_once('eventlet')
-def use_eventlet(monkeypatch_thread=None):
- global httplib, subprocess, Server
-
- # This must be set before the initial import of eventlet because if
- # dnspython is present in your environment then eventlet monkeypatches
- # socket.getaddrinfo() with an implementation which doesn't work for IPv6.
- os.environ['EVENTLET_NO_GREENDNS'] = 'yes'
-
- import eventlet
- from eventlet.green import httplib as _httplib
- from eventlet.green import subprocess as _subprocess
-
- from keystone.common.environment import eventlet_server
-
- if monkeypatch_thread is None:
- monkeypatch_thread = not os.getenv('STANDARD_THREADS')
-
- # Raise the default from 8192 to accommodate large tokens
- eventlet.wsgi.MAX_HEADER_LINE = 16384
-
- # NOTE(ldbragst): Explicitly declare what should be monkey patched and
- # what shouldn't. Doing this allows for more readable code when
- # understanding Eventlet in Keystone. The following is a complete list
- # of what is monkey patched instead of passing all=False and then passing
- # module=True to monkey patch a specific module.
- eventlet.patcher.monkey_patch(os=False, select=True, socket=True,
- thread=monkeypatch_thread, time=True,
- psycopg=False, MySQLdb=False)
-
- Server = eventlet_server.Server
- httplib = _httplib
- subprocess = _subprocess
-
-
-@configure_once('stdlib')
-def use_stdlib():
- global httplib, subprocess
-
- import six.moves.http_client as _httplib
- import subprocess as _subprocess # nosec : This is used in .federation.idp
- # and .common.openssl. See there.
-
- httplib = _httplib
- subprocess = _subprocess
diff --git a/keystone-moon/keystone/common/environment/eventlet_server.py b/keystone-moon/keystone/common/environment/eventlet_server.py
deleted file mode 100644
index 430ca3e4..00000000
--- a/keystone-moon/keystone/common/environment/eventlet_server.py
+++ /dev/null
@@ -1,212 +0,0 @@
-# Copyright 2012 OpenStack Foundation
-# Copyright 2010 United States Government as represented by the
-# Administrator of the National Aeronautics and Space Administration.
-# Copyright 2010 OpenStack Foundation
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import errno
-import re
-import socket
-import ssl
-import sys
-
-import eventlet
-import eventlet.wsgi
-import greenlet
-from oslo_config import cfg
-from oslo_log import log
-from oslo_service import service
-
-from keystone.i18n import _LE, _LI
-
-
-CONF = cfg.CONF
-
-
-LOG = log.getLogger(__name__)
-
-# The size of a pool that is used to spawn a single green thread in which
-# a wsgi server is then started. The size of one is enough, because in case
-# of several workers the parent process forks and each child gets a copy
-# of a pool, which does not include any greenthread object as the spawn is
-# done after the fork.
-POOL_SIZE = 1
-
-
-class EventletFilteringLogger(object):
- # NOTE(morganfainberg): This logger is designed to filter out specific
- # Tracebacks to limit the amount of data that eventlet can log. In the
- # case of broken sockets (EPIPE and ECONNRESET), we are seeing a huge
- # volume of data being written to the logs due to ~14 lines+ per traceback.
- # The traceback in these cases are, at best, useful for limited debugging
- # cases.
- def __init__(self, logger, level=log.INFO):
- self.logger = logger
- self.level = level
- self.regex = re.compile(r'errno (%d|%d)' %
- (errno.EPIPE, errno.ECONNRESET), re.IGNORECASE)
-
- def write(self, msg):
- m = self.regex.search(msg)
- if m:
- self.logger.log(log.logging.DEBUG, 'Error(%s) writing to socket.',
- m.group(1))
- else:
- self.logger.log(self.level, msg.rstrip())
-
-
-class Server(service.ServiceBase):
- """Server class to manage multiple WSGI sockets and applications."""
-
- def __init__(self, application, host=None, port=None, keepalive=False,
- keepidle=None):
- self.application = application
- self.host = host or '0.0.0.0' # nosec : Bind to all interfaces by
- # default for backwards compatibility.
- self.port = port or 0
- # Pool for a green thread in which wsgi server will be running
- self.pool = eventlet.GreenPool(POOL_SIZE)
- self.socket_info = {}
- self.greenthread = None
- self.do_ssl = False
- self.cert_required = False
- self.keepalive = keepalive
- self.keepidle = keepidle
- self.socket = None
-
- def listen(self, key=None, backlog=128):
- """Create and start listening on socket.
-
- Call before forking worker processes.
-
- Raises Exception if this has already been called.
- """
- # TODO(dims): eventlet's green dns/socket module does not actually
- # support IPv6 in getaddrinfo(). We need to get around this in the
- # future or monitor upstream for a fix.
- # Please refer below link
- # (https://bitbucket.org/eventlet/eventlet/
- # src/e0f578180d7d82d2ed3d8a96d520103503c524ec/eventlet/support/
- # greendns.py?at=0.12#cl-163)
- info = socket.getaddrinfo(self.host,
- self.port,
- socket.AF_UNSPEC,
- socket.SOCK_STREAM)[0]
-
- try:
- self.socket = eventlet.listen(info[-1], family=info[0],
- backlog=backlog)
- except EnvironmentError:
- LOG.error(_LE("Could not bind to %(host)s:%(port)s"),
- {'host': self.host, 'port': self.port})
- raise
-
- LOG.info(_LI('Starting %(arg0)s on %(host)s:%(port)s'),
- {'arg0': sys.argv[0],
- 'host': self.host,
- 'port': self.port})
-
- def start(self, key=None, backlog=128):
- """Run a WSGI server with the given application."""
- if self.socket is None:
- self.listen(key=key, backlog=backlog)
-
- dup_socket = self.socket.dup()
- if key:
- self.socket_info[key] = self.socket.getsockname()
- # SSL is enabled
- if self.do_ssl:
- if self.cert_required:
- cert_reqs = ssl.CERT_REQUIRED
- else:
- cert_reqs = ssl.CERT_NONE
-
- dup_socket = eventlet.wrap_ssl(dup_socket, certfile=self.certfile,
- keyfile=self.keyfile,
- server_side=True,
- cert_reqs=cert_reqs,
- ca_certs=self.ca_certs)
-
- # Optionally enable keepalive on the wsgi socket.
- if self.keepalive:
- dup_socket.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1)
-
- if self.keepidle is not None:
- if hasattr(socket, 'TCP_KEEPIDLE'):
- dup_socket.setsockopt(socket.IPPROTO_TCP,
- socket.TCP_KEEPIDLE,
- self.keepidle)
- else:
- LOG.warning("System does not support TCP_KEEPIDLE but "
- "tcp_keepidle has been set. Ignoring.")
-
- self.greenthread = self.pool.spawn(self._run,
- self.application,
- dup_socket)
-
- def set_ssl(self, certfile, keyfile=None, ca_certs=None,
- cert_required=True):
- self.certfile = certfile
- self.keyfile = keyfile
- self.ca_certs = ca_certs
- self.cert_required = cert_required
- self.do_ssl = True
-
- def stop(self):
- if self.greenthread is not None:
- self.greenthread.kill()
-
- def wait(self):
- """Wait until all servers have completed running."""
- try:
- self.pool.waitall()
- except KeyboardInterrupt: # nosec
- # If CTRL-C, just break out of the loop.
- pass
- except greenlet.GreenletExit: # nosec
- # If exiting, break out of the loop.
- pass
-
- def reset(self):
- """Required by the service interface.
-
- The service interface is used by the launcher when receiving a
- SIGHUP. The service interface is defined in
- oslo_service.service.Service.
-
- Keystone does not need to do anything here.
- """
- pass
-
- def _run(self, application, socket):
- """Start a WSGI server with a new green thread pool."""
- logger = log.getLogger('eventlet.wsgi.server')
-
- # NOTE(dolph): [eventlet_server] client_socket_timeout is required to
- # be an integer in keystone.conf, but in order to make
- # eventlet.wsgi.server() wait forever, we pass None instead of 0.
- socket_timeout = CONF.eventlet_server.client_socket_timeout or None
-
- try:
- eventlet.wsgi.server(
- socket, application, log=EventletFilteringLogger(logger),
- debug=False, keepalive=CONF.eventlet_server.wsgi_keep_alive,
- socket_timeout=socket_timeout)
- except greenlet.GreenletExit: # nosec
- # Wait until all servers have completed running
- pass
- except Exception:
- LOG.exception(_LE('Server error'))
- raise
diff --git a/keystone-moon/keystone/common/extension.py b/keystone-moon/keystone/common/extension.py
deleted file mode 100644
index be5de631..00000000
--- a/keystone-moon/keystone/common/extension.py
+++ /dev/null
@@ -1,44 +0,0 @@
-# Copyright 2013 OpenStack Foundation
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-
-ADMIN_EXTENSIONS = {}
-PUBLIC_EXTENSIONS = {}
-
-
-def register_admin_extension(url_prefix, extension_data):
- """Register extension with collection of admin extensions.
-
- Extensions register the information here that will show
- up in the /extensions page as a way to indicate that the extension is
- active.
-
- url_prefix: unique key for the extension that will appear in the
- urls generated by the extension.
-
- extension_data is a dictionary. The expected fields are:
- 'name': short, human readable name of the extension
- 'namespace': xml namespace
- 'alias': identifier for the extension
- 'updated': date the extension was last updated
- 'description': text description of the extension
- 'links': hyperlinks to documents describing the extension
-
- """
- ADMIN_EXTENSIONS[url_prefix] = extension_data
-
-
-def register_public_extension(url_prefix, extension_data):
- """Same as register_admin_extension but for public extensions."""
- PUBLIC_EXTENSIONS[url_prefix] = extension_data
diff --git a/keystone-moon/keystone/common/json_home.py b/keystone-moon/keystone/common/json_home.py
deleted file mode 100644
index 6876f8af..00000000
--- a/keystone-moon/keystone/common/json_home.py
+++ /dev/null
@@ -1,86 +0,0 @@
-# Copyright 2014 IBM Corp.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-
-from keystone import exception
-from keystone.i18n import _
-
-
-def build_v3_resource_relation(resource_name):
- return ('http://docs.openstack.org/api/openstack-identity/3/rel/%s' %
- resource_name)
-
-
-def build_v3_extension_resource_relation(extension_name, extension_version,
- resource_name):
- return (
- 'http://docs.openstack.org/api/openstack-identity/3/ext/%s/%s/rel/%s' %
- (extension_name, extension_version, resource_name))
-
-
-def build_v3_parameter_relation(parameter_name):
- return ('http://docs.openstack.org/api/openstack-identity/3/param/%s' %
- parameter_name)
-
-
-def build_v3_extension_parameter_relation(extension_name, extension_version,
- parameter_name):
- return (
- 'http://docs.openstack.org/api/openstack-identity/3/ext/%s/%s/param/'
- '%s' % (extension_name, extension_version, parameter_name))
-
-
-class Parameters(object):
- """Relationships for Common parameters."""
-
- DOMAIN_ID = build_v3_parameter_relation('domain_id')
- ENDPOINT_ID = build_v3_parameter_relation('endpoint_id')
- GROUP_ID = build_v3_parameter_relation('group_id')
- POLICY_ID = build_v3_parameter_relation('policy_id')
- PROJECT_ID = build_v3_parameter_relation('project_id')
- REGION_ID = build_v3_parameter_relation('region_id')
- ROLE_ID = build_v3_parameter_relation('role_id')
- SERVICE_ID = build_v3_parameter_relation('service_id')
- USER_ID = build_v3_parameter_relation('user_id')
-
-
-class Status(object):
- """Status values supported."""
-
- DEPRECATED = 'deprecated'
- EXPERIMENTAL = 'experimental'
- STABLE = 'stable'
-
- @classmethod
- def update_resource_data(cls, resource_data, status):
- if status is cls.STABLE:
- # We currently do not add a status if the resource is stable, the
- # absence of the status property can be taken as meaning that the
- # resource is stable.
- return
- if status is cls.DEPRECATED or status is cls.EXPERIMENTAL:
- resource_data['hints'] = {'status': status}
- return
-
- raise exception.Error(message=_(
- 'Unexpected status requested for JSON Home response, %s') % status)
-
-
-def translate_urls(json_home, new_prefix):
- """Given a JSON Home document, sticks new_prefix on each of the urls."""
- for dummy_rel, resource in json_home['resources'].items():
- if 'href' in resource:
- resource['href'] = new_prefix + resource['href']
- elif 'href-template' in resource:
- resource['href-template'] = new_prefix + resource['href-template']
diff --git a/keystone-moon/keystone/common/kvs/__init__.py b/keystone-moon/keystone/common/kvs/__init__.py
deleted file mode 100644
index 354bbd8a..00000000
--- a/keystone-moon/keystone/common/kvs/__init__.py
+++ /dev/null
@@ -1,32 +0,0 @@
-# Copyright 2013 Metacloud, Inc.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from dogpile.cache import region
-
-from keystone.common.kvs.core import * # noqa
-
-
-# NOTE(morganfainberg): Provided backends are registered here in the __init__
-# for the kvs system. Any out-of-tree backends should be registered via the
-# ``backends`` option in the ``[kvs]`` section of the Keystone configuration
-# file.
-region.register_backend(
- 'openstack.kvs.Memory',
- 'keystone.common.kvs.backends.inmemdb',
- 'MemoryBackend')
-
-region.register_backend(
- 'openstack.kvs.Memcached',
- 'keystone.common.kvs.backends.memcached',
- 'MemcachedBackend')
diff --git a/keystone-moon/keystone/common/kvs/backends/__init__.py b/keystone-moon/keystone/common/kvs/backends/__init__.py
deleted file mode 100644
index e69de29b..00000000
--- a/keystone-moon/keystone/common/kvs/backends/__init__.py
+++ /dev/null
diff --git a/keystone-moon/keystone/common/kvs/backends/inmemdb.py b/keystone-moon/keystone/common/kvs/backends/inmemdb.py
deleted file mode 100644
index 379b54bf..00000000
--- a/keystone-moon/keystone/common/kvs/backends/inmemdb.py
+++ /dev/null
@@ -1,68 +0,0 @@
-# Copyright 2013 Metacloud, Inc.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-"""Keystone In-Memory Dogpile.cache backend implementation."""
-
-import copy
-
-from dogpile.cache import api
-
-
-NO_VALUE = api.NO_VALUE
-
-
-class MemoryBackend(api.CacheBackend):
- """A backend that uses a plain dictionary.
-
- There is no size management, and values which are placed into the
- dictionary will remain until explicitly removed. Note that Dogpile's
- expiration of items is based on timestamps and does not remove them from
- the cache.
-
- E.g.::
-
- from dogpile.cache import make_region
-
- region = make_region().configure(
- 'keystone.common.kvs.Memory'
- )
- """
-
- def __init__(self, arguments):
- self._db = {}
-
- def _isolate_value(self, value):
- if value is not NO_VALUE:
- return copy.deepcopy(value)
- return value
-
- def get(self, key):
- return self._isolate_value(self._db.get(key, NO_VALUE))
-
- def get_multi(self, keys):
- return [self.get(key) for key in keys]
-
- def set(self, key, value):
- self._db[key] = self._isolate_value(value)
-
- def set_multi(self, mapping):
- for key, value in mapping.items():
- self.set(key, value)
-
- def delete(self, key):
- self._db.pop(key, None)
-
- def delete_multi(self, keys):
- for key in keys:
- self.delete(key)
diff --git a/keystone-moon/keystone/common/kvs/backends/memcached.py b/keystone-moon/keystone/common/kvs/backends/memcached.py
deleted file mode 100644
index a65cf877..00000000
--- a/keystone-moon/keystone/common/kvs/backends/memcached.py
+++ /dev/null
@@ -1,195 +0,0 @@
-# Copyright 2013 Metacloud, Inc.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-"""Keystone Memcached dogpile.cache backend implementation."""
-
-import random as _random
-import time
-
-from dogpile.cache import api
-from dogpile.cache.backends import memcached
-from oslo_cache.backends import memcache_pool
-from oslo_config import cfg
-from six.moves import range
-
-from keystone import exception
-from keystone.i18n import _
-
-
-CONF = cfg.CONF
-NO_VALUE = api.NO_VALUE
-random = _random.SystemRandom()
-
-VALID_DOGPILE_BACKENDS = dict(
- pylibmc=memcached.PylibmcBackend,
- bmemcached=memcached.BMemcachedBackend,
- memcached=memcached.MemcachedBackend,
- pooled_memcached=memcache_pool.PooledMemcachedBackend)
-
-
-class MemcachedLock(object):
- """Simple distributed lock using memcached.
-
- This is an adaptation of the lock featured at
- http://amix.dk/blog/post/19386
-
- """
-
- def __init__(self, client_fn, key, lock_timeout, max_lock_attempts):
- self.client_fn = client_fn
- self.key = "_lock" + key
- self.lock_timeout = lock_timeout
- self.max_lock_attempts = max_lock_attempts
-
- def acquire(self, wait=True):
- client = self.client_fn()
- for i in range(self.max_lock_attempts):
- if client.add(self.key, 1, self.lock_timeout):
- return True
- elif not wait:
- return False
- else:
- sleep_time = random.random() # nosec : random is not used for
- # crypto or security, it's just the time to delay between
- # retries.
- time.sleep(sleep_time)
- raise exception.UnexpectedError(
- _('Maximum lock attempts on %s occurred.') % self.key)
-
- def release(self):
- client = self.client_fn()
- client.delete(self.key)
-
-
-class MemcachedBackend(object):
- """Pivot point to leverage the various dogpile.cache memcached backends.
-
- To specify a specific dogpile.cache memcached backend, pass the argument
- `memcached_backend` set to one of the provided memcached backends (at this
- time `memcached`, `bmemcached`, `pylibmc` and `pooled_memcached` are
- valid).
- """
-
- def __init__(self, arguments):
- self._key_mangler = None
- self.raw_no_expiry_keys = set(arguments.pop('no_expiry_keys', set()))
- self.no_expiry_hashed_keys = set()
-
- self.lock_timeout = arguments.pop('lock_timeout', None)
- self.max_lock_attempts = arguments.pop('max_lock_attempts', 15)
- # NOTE(morganfainberg): Remove distributed locking from the arguments
- # passed to the "real" backend if it exists.
- arguments.pop('distributed_lock', None)
- backend = arguments.pop('memcached_backend', None)
- if 'url' not in arguments:
- # FIXME(morganfainberg): Log deprecation warning for old-style
- # configuration once full dict_config style configuration for
- # KVS backends is supported. For now use the current memcache
- # section of the configuration.
- arguments['url'] = CONF.memcache.servers
-
- if backend is None:
- # NOTE(morganfainberg): Use the basic memcached backend if nothing
- # else is supplied.
- self.driver = VALID_DOGPILE_BACKENDS['memcached'](arguments)
- else:
- if backend not in VALID_DOGPILE_BACKENDS:
- raise ValueError(
- _('Backend `%(backend)s` is not a valid memcached '
- 'backend. Valid backends: %(backend_list)s') %
- {'backend': backend,
- 'backend_list': ','.join(VALID_DOGPILE_BACKENDS.keys())})
- else:
- self.driver = VALID_DOGPILE_BACKENDS[backend](arguments)
-
- def __getattr__(self, name):
- """Forward calls to the underlying driver."""
- f = getattr(self.driver, name)
- setattr(self, name, f)
- return f
-
- def _get_set_arguments_driver_attr(self, exclude_expiry=False):
-
- # NOTE(morganfainberg): Shallow copy the .set_arguments dict to
- # ensure no changes cause the values to change in the instance
- # variable.
- set_arguments = getattr(self.driver, 'set_arguments', {}).copy()
-
- if exclude_expiry:
- # NOTE(morganfainberg): Explicitly strip out the 'time' key/value
- # from the set_arguments in the case that this key isn't meant
- # to expire
- set_arguments.pop('time', None)
- return set_arguments
-
- def set(self, key, value):
- mapping = {key: value}
- self.set_multi(mapping)
-
- def set_multi(self, mapping):
- mapping_keys = set(mapping.keys())
- no_expiry_keys = mapping_keys.intersection(self.no_expiry_hashed_keys)
- has_expiry_keys = mapping_keys.difference(self.no_expiry_hashed_keys)
-
- if no_expiry_keys:
- # NOTE(morganfainberg): For keys that have expiry excluded,
- # bypass the backend and directly call the client. Bypass directly
- # to the client is required as the 'set_arguments' are applied to
- # all ``set`` and ``set_multi`` calls by the driver, by calling
- # the client directly it is possible to exclude the ``time``
- # argument to the memcached server.
- new_mapping = {k: mapping[k] for k in no_expiry_keys}
- set_arguments = self._get_set_arguments_driver_attr(
- exclude_expiry=True)
- self.driver.client.set_multi(new_mapping, **set_arguments)
-
- if has_expiry_keys:
- new_mapping = {k: mapping[k] for k in has_expiry_keys}
- self.driver.set_multi(new_mapping)
-
- @classmethod
- def from_config_dict(cls, config_dict, prefix):
- prefix_len = len(prefix)
- return cls(
- {key[prefix_len:]: config_dict[key] for key in config_dict
- if key.startswith(prefix)})
-
- @property
- def key_mangler(self):
- if self._key_mangler is None:
- self._key_mangler = self.driver.key_mangler
- return self._key_mangler
-
- @key_mangler.setter
- def key_mangler(self, key_mangler):
- if callable(key_mangler):
- self._key_mangler = key_mangler
- self._rehash_keys()
- elif key_mangler is None:
- # NOTE(morganfainberg): Set the hashed key map to the unhashed
- # list since we no longer have a key_mangler.
- self._key_mangler = None
- self.no_expiry_hashed_keys = self.raw_no_expiry_keys
- else:
- raise TypeError(_('`key_mangler` functions must be callable.'))
-
- def _rehash_keys(self):
- no_expire = set()
- for key in self.raw_no_expiry_keys:
- no_expire.add(self._key_mangler(key))
- self.no_expiry_hashed_keys = no_expire
-
- def get_mutex(self, key):
- return MemcachedLock(lambda: self.driver.client, key,
- self.lock_timeout, self.max_lock_attempts)
diff --git a/keystone-moon/keystone/common/kvs/core.py b/keystone-moon/keystone/common/kvs/core.py
deleted file mode 100644
index 064825f8..00000000
--- a/keystone-moon/keystone/common/kvs/core.py
+++ /dev/null
@@ -1,450 +0,0 @@
-# Copyright 2013 Metacloud, Inc.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import contextlib
-import threading
-import time
-import weakref
-
-from dogpile.cache import api
-from dogpile.cache import proxy
-from dogpile.cache import region
-from dogpile.cache import util as dogpile_util
-from dogpile.core import nameregistry
-from oslo_config import cfg
-from oslo_log import log
-from oslo_utils import importutils
-from oslo_utils import reflection
-
-from keystone import exception
-from keystone.i18n import _
-from keystone.i18n import _LI
-from keystone.i18n import _LW
-
-
-__all__ = ('KeyValueStore', 'KeyValueStoreLock', 'LockTimeout',
- 'get_key_value_store')
-
-
-BACKENDS_REGISTERED = False
-CONF = cfg.CONF
-KEY_VALUE_STORE_REGISTRY = weakref.WeakValueDictionary()
-LOCK_WINDOW = 1
-LOG = log.getLogger(__name__)
-NO_VALUE = api.NO_VALUE
-
-
-def _register_backends():
- # NOTE(morganfainberg): This function exists to ensure we do not try and
- # register the backends prior to the configuration object being fully
- # available. We also need to ensure we do not register a given backend
- # more than one time. All backends will be prefixed with openstack.kvs
- # as the "short" name to reference them for configuration purposes. This
- # function is used in addition to the pre-registered backends in the
- # __init__ file for the KVS system.
- global BACKENDS_REGISTERED
-
- if not BACKENDS_REGISTERED:
- prefix = 'openstack.kvs.%s'
- for backend in CONF.kvs.backends:
- module, cls = backend.rsplit('.', 1)
- backend_name = prefix % cls
- LOG.debug(('Registering Dogpile Backend %(backend_path)s as '
- '%(backend_name)s'),
- {'backend_path': backend, 'backend_name': backend_name})
- region.register_backend(backend_name, module, cls)
- BACKENDS_REGISTERED = True
-
-
-def sha1_mangle_key(key):
- """Wrapper for dogpile's sha1_mangle_key.
-
- Taken from oslo_cache.core._sha1_mangle_key
-
- dogpile's sha1_mangle_key function expects an encoded string, so we
- should take steps to properly handle multiple inputs before passing
- the key through.
- """
- try:
- key = key.encode('utf-8', errors='xmlcharrefreplace')
- except (UnicodeError, AttributeError): # nosec
- # NOTE(stevemar): if encoding fails just continue anyway.
- pass
- return dogpile_util.sha1_mangle_key(key)
-
-
-class LockTimeout(exception.UnexpectedError):
- debug_message_format = _('Lock Timeout occurred for key, %(target)s')
-
-
-class KeyValueStore(object):
- """Basic KVS manager object to support Keystone Key-Value-Store systems.
-
- This manager also supports the concept of locking a given key resource to
- allow for a guaranteed atomic transaction to the backend.
- """
-
- def __init__(self, kvs_region):
- self.locking = True
- self._lock_timeout = 0
- self._region = kvs_region
- self._security_strategy = None
- self._secret_key = None
- self._lock_registry = nameregistry.NameRegistry(self._create_mutex)
-
- def configure(self, backing_store, key_mangler=None, proxy_list=None,
- locking=True, **region_config_args):
- """Configure the KeyValueStore instance.
-
- :param backing_store: dogpile.cache short name of the region backend
- :param key_mangler: key_mangler function
- :param proxy_list: list of proxy classes to apply to the region
- :param locking: boolean that allows disabling of locking mechanism for
- this instantiation
- :param region_config_args: key-word args passed to the dogpile.cache
- backend for configuration
- """
- if self.is_configured:
- # NOTE(morganfainberg): It is a bad idea to reconfigure a backend,
- # there are a lot of pitfalls and potential memory leaks that could
- # occur. By far the best approach is to re-create the KVS object
- # with the new configuration.
- raise RuntimeError(_('KVS region %s is already configured. '
- 'Cannot reconfigure.') % self._region.name)
-
- self.locking = locking
- self._lock_timeout = region_config_args.pop(
- 'lock_timeout', CONF.kvs.default_lock_timeout)
- self._configure_region(backing_store, **region_config_args)
- self._set_key_mangler(key_mangler)
- self._apply_region_proxy(proxy_list)
-
- @property
- def is_configured(self):
- return 'backend' in self._region.__dict__
-
- def _apply_region_proxy(self, proxy_list):
- if isinstance(proxy_list, list):
- proxies = []
-
- for item in proxy_list:
- if isinstance(item, str):
- LOG.debug('Importing class %s as KVS proxy.', item)
- pxy = importutils.import_class(item)
- else:
- pxy = item
-
- if issubclass(pxy, proxy.ProxyBackend):
- proxies.append(pxy)
- else:
- pxy_cls_name = reflection.get_class_name(
- pxy, fully_qualified=False)
- LOG.warning(_LW('%s is not a dogpile.proxy.ProxyBackend'),
- pxy_cls_name)
-
- for proxy_cls in reversed(proxies):
- proxy_cls_name = reflection.get_class_name(
- proxy_cls, fully_qualified=False)
- LOG.info(_LI('Adding proxy \'%(proxy)s\' to KVS %(name)s.'),
- {'proxy': proxy_cls_name,
- 'name': self._region.name})
- self._region.wrap(proxy_cls)
-
- def _assert_configured(self):
- if'backend' not in self._region.__dict__:
- raise exception.UnexpectedError(_('Key Value Store not '
- 'configured: %s'),
- self._region.name)
-
- def _set_keymangler_on_backend(self, key_mangler):
- try:
- self._region.backend.key_mangler = key_mangler
- except Exception as e:
- # NOTE(morganfainberg): The setting of the key_mangler on the
- # backend is used to allow the backend to
- # calculate a hashed key value as needed. Not all backends
- # require the ability to calculate hashed keys. If the
- # backend does not support/require this feature log a
- # debug line and move on otherwise raise the proper exception.
- # Support of the feature is implied by the existence of the
- # 'raw_no_expiry_keys' attribute.
- if not hasattr(self._region.backend, 'raw_no_expiry_keys'):
- LOG.debug(('Non-expiring keys not supported/required by '
- '%(region)s backend; unable to set '
- 'key_mangler for backend: %(err)s'),
- {'region': self._region.name, 'err': e})
- else:
- raise
-
- def _set_key_mangler(self, key_mangler):
- # Set the key_mangler that is appropriate for the given region being
- # configured here. The key_mangler function is called prior to storing
- # the value(s) in the backend. This is to help prevent collisions and
- # limit issues such as memcache's limited cache_key size.
- use_backend_key_mangler = getattr(self._region.backend,
- 'use_backend_key_mangler', False)
- if ((key_mangler is None or use_backend_key_mangler) and
- (self._region.backend.key_mangler is not None)):
- # NOTE(morganfainberg): Use the configured key_mangler as a first
- # choice. Second choice would be the key_mangler defined by the
- # backend itself. Finally, fall back to the defaults. The one
- # exception is if the backend defines `use_backend_key_mangler`
- # as True, which indicates the backend's key_mangler should be
- # the first choice.
- key_mangler = self._region.backend.key_mangler
-
- if CONF.kvs.enable_key_mangler:
- if key_mangler is not None:
- msg = _LI('Using %(func)s as KVS region %(name)s key_mangler')
- if callable(key_mangler):
- self._region.key_mangler = key_mangler
- LOG.info(msg, {'func': key_mangler.__name__,
- 'name': self._region.name})
- else:
- # NOTE(morganfainberg): We failed to set the key_mangler,
- # we should error out here to ensure we aren't causing
- # key-length or collision issues.
- raise exception.ValidationError(
- _('`key_mangler` option must be a function reference'))
- else:
- msg = _LI('Using default keystone.common.kvs.sha1_mangle_key '
- 'as KVS region %s key_mangler')
- LOG.info(msg, self._region.name)
- # NOTE(morganfainberg): Use 'default' keymangler to ensure
- # that unless explicitly changed, we mangle keys. This helps
- # to limit unintended cases of exceeding cache-key in backends
- # such as memcache.
- self._region.key_mangler = sha1_mangle_key
- self._set_keymangler_on_backend(self._region.key_mangler)
- else:
- LOG.info(_LI('KVS region %s key_mangler disabled.'),
- self._region.name)
- self._set_keymangler_on_backend(None)
-
- def _configure_region(self, backend, **config_args):
- prefix = CONF.kvs.config_prefix
- conf_dict = {}
- conf_dict['%s.backend' % prefix] = backend
-
- if 'distributed_lock' not in config_args:
- config_args['distributed_lock'] = True
-
- config_args['lock_timeout'] = self._lock_timeout
-
- # NOTE(morganfainberg): To mitigate race conditions on comparing
- # the timeout and current time on the lock mutex, we are building
- # in a static 1 second overlap where the lock will still be valid
- # in the backend but not from the perspective of the context
- # manager. Since we must develop to the lowest-common-denominator
- # when it comes to the backends, memcache's cache store is not more
- # refined than 1 second, therefore we must build in at least a 1
- # second overlap. `lock_timeout` of 0 means locks never expire.
- if config_args['lock_timeout'] > 0:
- config_args['lock_timeout'] += LOCK_WINDOW
-
- for argument, value in config_args.items():
- arg_key = '.'.join([prefix, 'arguments', argument])
- conf_dict[arg_key] = value
-
- LOG.debug('KVS region configuration for %(name)s: %(config)r',
- {'name': self._region.name, 'config': conf_dict})
- self._region.configure_from_config(conf_dict, '%s.' % prefix)
-
- def _mutex(self, key):
- return self._lock_registry.get(key)
-
- def _create_mutex(self, key):
- mutex = self._region.backend.get_mutex(key)
- if mutex is not None:
- return mutex
- else:
- return self._LockWrapper(lock_timeout=self._lock_timeout)
-
- class _LockWrapper(object):
- """weakref-capable threading.Lock wrapper."""
-
- def __init__(self, lock_timeout):
- self.lock = threading.Lock()
- self.lock_timeout = lock_timeout
-
- def acquire(self, wait=True):
- return self.lock.acquire(wait)
-
- def release(self):
- self.lock.release()
-
- def get(self, key):
- """Get a single value from the KVS backend."""
- self._assert_configured()
- value = self._region.get(key)
- if value is NO_VALUE:
- raise exception.NotFound(target=key)
- return value
-
- def get_multi(self, keys):
- """Get multiple values in a single call from the KVS backend."""
- self._assert_configured()
- values = self._region.get_multi(keys)
- not_found = []
- for index, key in enumerate(keys):
- if values[index] is NO_VALUE:
- not_found.append(key)
- if not_found:
- # NOTE(morganfainberg): If any of the multi-get values are non-
- # existent, we should raise a NotFound error to mimic the .get()
- # method's behavior. In all cases the internal dogpile NO_VALUE
- # should be masked from the consumer of the KeyValueStore.
- raise exception.NotFound(target=not_found)
- return values
-
- def set(self, key, value, lock=None):
- """Set a single value in the KVS backend."""
- self._assert_configured()
- with self._action_with_lock(key, lock):
- self._region.set(key, value)
-
- def set_multi(self, mapping):
- """Set multiple key/value pairs in the KVS backend at once.
-
- Like delete_multi, this call does not serialize through the
- KeyValueStoreLock mechanism (locking cannot occur on more than one
- key in a given context without significant deadlock potential).
- """
- self._assert_configured()
- self._region.set_multi(mapping)
-
- def delete(self, key, lock=None):
- """Delete a single key from the KVS backend.
-
- This method will raise NotFound if the key doesn't exist. The get and
- delete are done in a single transaction (via KeyValueStoreLock
- mechanism).
- """
- self._assert_configured()
-
- with self._action_with_lock(key, lock):
- self.get(key)
- self._region.delete(key)
-
- def delete_multi(self, keys):
- """Delete multiple keys from the KVS backend in a single call.
-
- Like set_multi, this call does not serialize through the
- KeyValueStoreLock mechanism (locking cannot occur on more than one
- key in a given context without significant deadlock potential).
- """
- self._assert_configured()
- self._region.delete_multi(keys)
-
- def get_lock(self, key):
- """Get a write lock on the KVS value referenced by `key`.
-
- The ability to get a context manager to pass into the set/delete
- methods allows for a single-transaction to occur while guaranteeing the
- backing store will not change between the start of the 'lock' and the
- end. Lock timeout is fixed to the KeyValueStore configured lock
- timeout.
- """
- self._assert_configured()
- return KeyValueStoreLock(self._mutex(key), key, self.locking,
- self._lock_timeout)
-
- @contextlib.contextmanager
- def _action_with_lock(self, key, lock=None):
- """Wrapper context manager.
-
- Validates and handles the lock and lock timeout if passed in.
- """
- if not isinstance(lock, KeyValueStoreLock):
- # NOTE(morganfainberg): Locking only matters if a lock is passed in
- # to this method. If lock isn't a KeyValueStoreLock, treat this as
- # if no locking needs to occur.
- yield
- else:
- if not lock.key == key:
- raise ValueError(_('Lock key must match target key: %(lock)s '
- '!= %(target)s') %
- {'lock': lock.key, 'target': key})
- if not lock.active:
- raise exception.ValidationError(_('Must be called within an '
- 'active lock context.'))
- if not lock.expired:
- yield
- else:
- raise LockTimeout(target=key)
-
-
-class KeyValueStoreLock(object):
- """Basic KeyValueStoreLock context manager.
-
- Hooks into the dogpile.cache backend mutex allowing for distributed locking
- on resources. This is only a write lock, and will not prevent reads from
- occurring.
- """
-
- def __init__(self, mutex, key, locking_enabled=True, lock_timeout=0):
- self.mutex = mutex
- self.key = key
- self.enabled = locking_enabled
- self.lock_timeout = lock_timeout
- self.active = False
- self.acquire_time = 0
-
- def acquire(self):
- if self.enabled:
- self.mutex.acquire()
- LOG.debug('KVS lock acquired for: %s', self.key)
- self.active = True
- self.acquire_time = time.time()
- return self
-
- __enter__ = acquire
-
- @property
- def expired(self):
- if self.lock_timeout:
- calculated = time.time() - self.acquire_time + LOCK_WINDOW
- return calculated > self.lock_timeout
- else:
- return False
-
- def release(self):
- if self.enabled:
- self.mutex.release()
- if not self.expired:
- LOG.debug('KVS lock released for: %s', self.key)
- else:
- LOG.warning(_LW('KVS lock released (timeout reached) for: %s'),
- self.key)
-
- def __exit__(self, exc_type, exc_val, exc_tb):
- self.release()
-
-
-def get_key_value_store(name, kvs_region=None):
- """Retrieve key value store.
-
- Instantiate a new :class:`.KeyValueStore` or return a previous
- instantiation that has the same name.
- """
- global KEY_VALUE_STORE_REGISTRY
-
- _register_backends()
- key_value_store = KEY_VALUE_STORE_REGISTRY.get(name)
- if key_value_store is None:
- if kvs_region is None:
- kvs_region = region.make_region(name=name)
- key_value_store = KeyValueStore(kvs_region)
- KEY_VALUE_STORE_REGISTRY[name] = key_value_store
- return key_value_store
diff --git a/keystone-moon/keystone/common/kvs/legacy.py b/keystone-moon/keystone/common/kvs/legacy.py
deleted file mode 100644
index 7e27d97f..00000000
--- a/keystone-moon/keystone/common/kvs/legacy.py
+++ /dev/null
@@ -1,61 +0,0 @@
-# Copyright 2012 OpenStack Foundation
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from oslo_log import versionutils
-
-from keystone import exception
-
-
-class DictKvs(dict):
- def get(self, key, default=None):
- try:
- if isinstance(self[key], dict):
- return self[key].copy()
- else:
- return self[key][:]
- except KeyError:
- if default is not None:
- return default
- raise exception.NotFound(target=key)
-
- def set(self, key, value):
- if isinstance(value, dict):
- self[key] = value.copy()
- else:
- self[key] = value[:]
-
- def delete(self, key):
- """Deletes an item, returning True on success, False otherwise."""
- try:
- del self[key]
- except KeyError:
- raise exception.NotFound(target=key)
-
-
-INMEMDB = DictKvs()
-
-
-class Base(object):
- @versionutils.deprecated(versionutils.deprecated.ICEHOUSE,
- in_favor_of='keystone.common.kvs.KeyValueStore',
- remove_in=+2,
- what='keystone.common.kvs.Base')
- def __init__(self, db=None):
- if db is None:
- db = INMEMDB
- elif isinstance(db, DictKvs):
- db = db
- elif isinstance(db, dict):
- db = DictKvs(db)
- self.db = db
diff --git a/keystone-moon/keystone/common/ldap/__init__.py b/keystone-moon/keystone/common/ldap/__init__.py
deleted file mode 100644
index ab5bf4d0..00000000
--- a/keystone-moon/keystone/common/ldap/__init__.py
+++ /dev/null
@@ -1,15 +0,0 @@
-# Copyright 2012 OpenStack Foundation
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from keystone.common.ldap.core import * # noqa
diff --git a/keystone-moon/keystone/common/ldap/core.py b/keystone-moon/keystone/common/ldap/core.py
deleted file mode 100644
index d94aa04c..00000000
--- a/keystone-moon/keystone/common/ldap/core.py
+++ /dev/null
@@ -1,1955 +0,0 @@
-# Copyright 2012 OpenStack Foundation
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import abc
-import codecs
-import functools
-import os.path
-import re
-import sys
-import weakref
-
-import ldap.controls
-import ldap.filter
-import ldappool
-from oslo_log import log
-from oslo_utils import reflection
-import six
-from six.moves import map, zip
-
-from keystone.common import driver_hints
-from keystone import exception
-from keystone.i18n import _
-from keystone.i18n import _LW
-
-
-LOG = log.getLogger(__name__)
-
-LDAP_VALUES = {'TRUE': True, 'FALSE': False}
-CONTROL_TREEDELETE = '1.2.840.113556.1.4.805'
-LDAP_SCOPES = {'one': ldap.SCOPE_ONELEVEL,
- 'sub': ldap.SCOPE_SUBTREE}
-LDAP_DEREF = {'always': ldap.DEREF_ALWAYS,
- 'default': None,
- 'finding': ldap.DEREF_FINDING,
- 'never': ldap.DEREF_NEVER,
- 'searching': ldap.DEREF_SEARCHING}
-LDAP_TLS_CERTS = {'never': ldap.OPT_X_TLS_NEVER,
- 'demand': ldap.OPT_X_TLS_DEMAND,
- 'allow': ldap.OPT_X_TLS_ALLOW}
-
-
-# RFC 4511 (The LDAP Protocol) defines a list containing only the OID '1.1' to
-# indicate that no attributes should be returned besides the DN.
-DN_ONLY = ['1.1']
-
-_utf8_encoder = codecs.getencoder('utf-8')
-
-
-def utf8_encode(value):
- """Encode a basestring to UTF-8.
-
- If the string is unicode encode it to UTF-8, if the string is
- str then assume it's already encoded. Otherwise raise a TypeError.
-
- :param value: A basestring
- :returns: UTF-8 encoded version of value
- :raises TypeError: If value is not basestring
- """
- if isinstance(value, six.text_type):
- return _utf8_encoder(value)[0]
- elif isinstance(value, six.binary_type):
- return value
- else:
- value_cls_name = reflection.get_class_name(
- value, fully_qualified=False)
- raise TypeError("value must be basestring, "
- "not %s" % value_cls_name)
-
-_utf8_decoder = codecs.getdecoder('utf-8')
-
-
-def utf8_decode(value):
- """Decode a from UTF-8 into unicode.
-
- If the value is a binary string assume it's UTF-8 encoded and decode
- it into a unicode string. Otherwise convert the value from its
- type into a unicode string.
-
- :param value: value to be returned as unicode
- :returns: value as unicode
- :raises UnicodeDecodeError: for invalid UTF-8 encoding
- """
- if isinstance(value, six.binary_type):
- return _utf8_decoder(value)[0]
- return six.text_type(value)
-
-
-def py2ldap(val):
- """Type convert a Python value to a type accepted by LDAP (unicode).
-
- The LDAP API only accepts strings for values therefore convert
- the value's type to a unicode string. A subsequent type conversion
- will encode the unicode as UTF-8 as required by the python-ldap API,
- but for now we just want a string representation of the value.
-
- :param val: The value to convert to a LDAP string representation
- :returns: unicode string representation of value.
- """
- if isinstance(val, bool):
- return u'TRUE' if val else u'FALSE'
- else:
- return six.text_type(val)
-
-
-def enabled2py(val):
- """Similar to ldap2py, only useful for the enabled attribute."""
- try:
- return LDAP_VALUES[val]
- except KeyError: # nosec
- # It wasn't a boolean value, will try as an int instead.
- pass
- try:
- return int(val)
- except ValueError: # nosec
- # It wasn't an int either, will try as utf8 instead.
- pass
- return utf8_decode(val)
-
-
-def ldap2py(val):
- """Convert an LDAP formatted value to Python type used by OpenStack.
-
- Virtually all LDAP values are stored as UTF-8 encoded strings.
- OpenStack prefers values which are unicode friendly.
-
- :param val: LDAP formatted value
- :returns: val converted to preferred Python type
- """
- return utf8_decode(val)
-
-
-def convert_ldap_result(ldap_result):
- """Convert LDAP search result to Python types used by OpenStack.
-
- Each result tuple is of the form (dn, attrs), where dn is a string
- containing the DN (distinguished name) of the entry, and attrs is
- a dictionary containing the attributes associated with the
- entry. The keys of attrs are strings, and the associated values
- are lists of strings.
-
- OpenStack wants to use Python types of its choosing. Strings will
- be unicode, truth values boolean, whole numbers int's, etc. DN's will
- also be decoded from UTF-8 to unicode.
-
- :param ldap_result: LDAP search result
- :returns: list of 2-tuples containing (dn, attrs) where dn is unicode
- and attrs is a dict whose values are type converted to
- OpenStack preferred types.
- """
- py_result = []
- at_least_one_referral = False
- for dn, attrs in ldap_result:
- ldap_attrs = {}
- if dn is None:
- # this is a Referral object, rather than an Entry object
- at_least_one_referral = True
- continue
-
- for kind, values in attrs.items():
- try:
- val2py = enabled2py if kind == 'enabled' else ldap2py
- ldap_attrs[kind] = [val2py(x) for x in values]
- except UnicodeDecodeError:
- LOG.debug('Unable to decode value for attribute %s', kind)
-
- py_result.append((utf8_decode(dn), ldap_attrs))
- if at_least_one_referral:
- LOG.debug(('Referrals were returned and ignored. Enable referral '
- 'chasing in keystone.conf via [ldap] chase_referrals'))
-
- return py_result
-
-
-def safe_iter(attrs):
- if attrs is None:
- return
- elif isinstance(attrs, list):
- for e in attrs:
- yield e
- else:
- yield attrs
-
-
-def parse_deref(opt):
- try:
- return LDAP_DEREF[opt]
- except KeyError:
- raise ValueError(_('Invalid LDAP deref option: %(option)s. '
- 'Choose one of: %(options)s') %
- {'option': opt,
- 'options': ', '.join(LDAP_DEREF.keys()), })
-
-
-def parse_tls_cert(opt):
- try:
- return LDAP_TLS_CERTS[opt]
- except KeyError:
- raise ValueError(_(
- 'Invalid LDAP TLS certs option: %(option)s. '
- 'Choose one of: %(options)s') % {
- 'option': opt,
- 'options': ', '.join(LDAP_TLS_CERTS.keys())})
-
-
-def ldap_scope(scope):
- try:
- return LDAP_SCOPES[scope]
- except KeyError:
- raise ValueError(
- _('Invalid LDAP scope: %(scope)s. Choose one of: %(options)s') % {
- 'scope': scope,
- 'options': ', '.join(LDAP_SCOPES.keys())})
-
-
-def prep_case_insensitive(value):
- """Prepare a string for case-insensitive comparison.
-
- This is defined in RFC4518. For simplicity, all this function does is
- lowercase all the characters, strip leading and trailing whitespace,
- and compress sequences of spaces to a single space.
- """
- value = re.sub(r'\s+', ' ', value.strip().lower())
- return value
-
-
-def is_ava_value_equal(attribute_type, val1, val2):
- """Returns True if and only if the AVAs are equal.
-
- When comparing AVAs, the equality matching rule for the attribute type
- should be taken into consideration. For simplicity, this implementation
- does a case-insensitive comparison.
-
- Note that this function uses prep_case_insenstive so the limitations of
- that function apply here.
-
- """
- return prep_case_insensitive(val1) == prep_case_insensitive(val2)
-
-
-def is_rdn_equal(rdn1, rdn2):
- """Returns True if and only if the RDNs are equal.
-
- * RDNs must have the same number of AVAs.
- * Each AVA of the RDNs must be the equal for the same attribute type. The
- order isn't significant. Note that an attribute type will only be in one
- AVA in an RDN, otherwise the DN wouldn't be valid.
- * Attribute types aren't case sensitive. Note that attribute type
- comparison is more complicated than implemented. This function only
- compares case-insentive. The code should handle multiple names for an
- attribute type (e.g., cn, commonName, and 2.5.4.3 are the same).
-
- Note that this function uses is_ava_value_equal to compare AVAs so the
- limitations of that function apply here.
-
- """
- if len(rdn1) != len(rdn2):
- return False
-
- for attr_type_1, val1, dummy in rdn1:
- found = False
- for attr_type_2, val2, dummy in rdn2:
- if attr_type_1.lower() != attr_type_2.lower():
- continue
-
- found = True
- if not is_ava_value_equal(attr_type_1, val1, val2):
- return False
- break
- if not found:
- return False
-
- return True
-
-
-def is_dn_equal(dn1, dn2):
- """Returns True if and only if the DNs are equal.
-
- Two DNs are equal if they've got the same number of RDNs and if the RDNs
- are the same at each position. See RFC4517.
-
- Note that this function uses is_rdn_equal to compare RDNs so the
- limitations of that function apply here.
-
- :param dn1: Either a string DN or a DN parsed by ldap.dn.str2dn.
- :param dn2: Either a string DN or a DN parsed by ldap.dn.str2dn.
-
- """
- if not isinstance(dn1, list):
- dn1 = ldap.dn.str2dn(utf8_encode(dn1))
- if not isinstance(dn2, list):
- dn2 = ldap.dn.str2dn(utf8_encode(dn2))
-
- if len(dn1) != len(dn2):
- return False
-
- for rdn1, rdn2 in zip(dn1, dn2):
- if not is_rdn_equal(rdn1, rdn2):
- return False
- return True
-
-
-def dn_startswith(descendant_dn, dn):
- """Returns True if and only if the descendant_dn is under the dn.
-
- :param descendant_dn: Either a string DN or a DN parsed by ldap.dn.str2dn.
- :param dn: Either a string DN or a DN parsed by ldap.dn.str2dn.
-
- """
- if not isinstance(descendant_dn, list):
- descendant_dn = ldap.dn.str2dn(utf8_encode(descendant_dn))
- if not isinstance(dn, list):
- dn = ldap.dn.str2dn(utf8_encode(dn))
-
- if len(descendant_dn) <= len(dn):
- return False
-
- # Use the last len(dn) RDNs.
- return is_dn_equal(descendant_dn[-len(dn):], dn)
-
-
-@six.add_metaclass(abc.ABCMeta)
-class LDAPHandler(object):
- """Abstract class which defines methods for a LDAP API provider.
-
- Native Keystone values cannot be passed directly into and from the
- python-ldap API. Type conversion must occur at the LDAP API
- boudary, examples of type conversions are:
-
- * booleans map to the strings 'TRUE' and 'FALSE'
-
- * integer values map to their string representation.
-
- * unicode strings are encoded in UTF-8
-
- In addition to handling type conversions at the API boundary we
- have the requirement to support more than one LDAP API
- provider. Currently we have:
-
- * python-ldap, this is the standard LDAP API for Python, it
- requires access to a live LDAP server.
-
- * Fake LDAP which emulates python-ldap. This is used for
- testing without requiring a live LDAP server.
-
- To support these requirements we need a layer that performs type
- conversions and then calls another LDAP API which is configurable
- (e.g. either python-ldap or the fake emulation).
-
- We have an additional constraint at the time of this writing due to
- limitations in the logging module. The logging module is not
- capable of accepting UTF-8 encoded strings, it will throw an
- encoding exception. Therefore all logging MUST be performed prior
- to UTF-8 conversion. This means no logging can be performed in the
- ldap APIs that implement the python-ldap API because those APIs
- are defined to accept only UTF-8 strings. Thus the layer which
- performs type conversions must also do the logging. We do the type
- conversions in two steps, once to convert all Python types to
- unicode strings, then log, then convert the unicode strings to
- UTF-8.
-
- There are a variety of ways one could accomplish this, we elect to
- use a chaining technique whereby instances of this class simply
- call the next member in the chain via the "conn" attribute. The
- chain is constructed by passing in an existing instance of this
- class as the conn attribute when the class is instantiated.
-
- Here is a brief explanation of why other possible approaches were
- not used:
-
- subclassing
-
- To perform the wrapping operations in the correct order
- the type convesion class would have to subclass each of
- the API providers. This is awkward, doubles the number of
- classes, and does not scale well. It requires the type
- conversion class to be aware of all possible API
- providers.
-
- decorators
-
- Decorators provide an elegant solution to wrap methods and
- would be an ideal way to perform type conversions before
- calling the wrapped function and then converting the
- values returned from the wrapped function. However
- decorators need to be aware of the method signature, it
- has to know what input parameters need conversion and how
- to convert the result. For an API like python-ldap which
- has a large number of different method signatures it would
- require a large number of specialized
- decorators. Experience has shown it's very easy to apply
- the wrong decorator due to the inherent complexity and
- tendency to cut-n-paste code. Another option is to
- parameterize the decorator to make it "smart". Experience
- has shown such decorators become insanely complicated and
- difficult to understand and debug. Also decorators tend to
- hide what's really going on when a method is called, the
- operations being performed are not visible when looking at
- the implemation of a decorated method, this too experience
- has shown leads to mistakes.
-
- Chaining simplifies both wrapping to perform type conversion as
- well as the substitution of alternative API providers. One simply
- creates a new instance of the API interface and insert it at the
- front of the chain. Type conversions are explicit and obvious.
-
- If a new method needs to be added to the API interface one adds it
- to the abstract class definition. Should one miss adding the new
- method to any derivations of the abstract class the code will fail
- to load and run making it impossible to forget updating all the
- derived classes.
-
- """
-
- @abc.abstractmethod
- def __init__(self, conn=None):
- self.conn = conn
-
- @abc.abstractmethod
- def connect(self, url, page_size=0, alias_dereferencing=None,
- use_tls=False, tls_cacertfile=None, tls_cacertdir=None,
- tls_req_cert='demand', chase_referrals=None, debug_level=None,
- use_pool=None, pool_size=None, pool_retry_max=None,
- pool_retry_delay=None, pool_conn_timeout=None,
- pool_conn_lifetime=None):
- raise exception.NotImplemented() # pragma: no cover
-
- @abc.abstractmethod
- def set_option(self, option, invalue):
- raise exception.NotImplemented() # pragma: no cover
-
- @abc.abstractmethod
- def get_option(self, option):
- raise exception.NotImplemented() # pragma: no cover
-
- @abc.abstractmethod
- def simple_bind_s(self, who='', cred='',
- serverctrls=None, clientctrls=None):
- raise exception.NotImplemented() # pragma: no cover
-
- @abc.abstractmethod
- def unbind_s(self):
- raise exception.NotImplemented() # pragma: no cover
-
- @abc.abstractmethod
- def add_s(self, dn, modlist):
- raise exception.NotImplemented() # pragma: no cover
-
- @abc.abstractmethod
- def search_s(self, base, scope,
- filterstr='(objectClass=*)', attrlist=None, attrsonly=0):
- raise exception.NotImplemented() # pragma: no cover
-
- @abc.abstractmethod
- def search_ext(self, base, scope,
- filterstr='(objectClass=*)', attrlist=None, attrsonly=0,
- serverctrls=None, clientctrls=None,
- timeout=-1, sizelimit=0):
- raise exception.NotImplemented() # pragma: no cover
-
- @abc.abstractmethod
- def result3(self, msgid=ldap.RES_ANY, all=1, timeout=None,
- resp_ctrl_classes=None):
- raise exception.NotImplemented() # pragma: no cover
-
- @abc.abstractmethod
- def modify_s(self, dn, modlist):
- raise exception.NotImplemented() # pragma: no cover
-
- @abc.abstractmethod
- def delete_s(self, dn):
- raise exception.NotImplemented() # pragma: no cover
-
- @abc.abstractmethod
- def delete_ext_s(self, dn, serverctrls=None, clientctrls=None):
- raise exception.NotImplemented() # pragma: no cover
-
-
-class PythonLDAPHandler(LDAPHandler):
- """LDAPHandler implementation which calls the python-ldap API.
-
- Note, the python-ldap API requires all string values to be UTF-8 encoded.
- The KeystoneLDAPHandler enforces this prior to invoking the methods in this
- class.
-
- """
-
- def __init__(self, conn=None):
- super(PythonLDAPHandler, self).__init__(conn=conn)
-
- def connect(self, url, page_size=0, alias_dereferencing=None,
- use_tls=False, tls_cacertfile=None, tls_cacertdir=None,
- tls_req_cert='demand', chase_referrals=None, debug_level=None,
- use_pool=None, pool_size=None, pool_retry_max=None,
- pool_retry_delay=None, pool_conn_timeout=None,
- pool_conn_lifetime=None):
-
- _common_ldap_initialization(url=url,
- use_tls=use_tls,
- tls_cacertfile=tls_cacertfile,
- tls_cacertdir=tls_cacertdir,
- tls_req_cert=tls_req_cert,
- debug_level=debug_level)
-
- self.conn = ldap.initialize(url)
- self.conn.protocol_version = ldap.VERSION3
-
- if alias_dereferencing is not None:
- self.conn.set_option(ldap.OPT_DEREF, alias_dereferencing)
- self.page_size = page_size
-
- if use_tls:
- self.conn.start_tls_s()
-
- if chase_referrals is not None:
- self.conn.set_option(ldap.OPT_REFERRALS, int(chase_referrals))
-
- def set_option(self, option, invalue):
- return self.conn.set_option(option, invalue)
-
- def get_option(self, option):
- return self.conn.get_option(option)
-
- def simple_bind_s(self, who='', cred='',
- serverctrls=None, clientctrls=None):
- return self.conn.simple_bind_s(who, cred, serverctrls, clientctrls)
-
- def unbind_s(self):
- return self.conn.unbind_s()
-
- def add_s(self, dn, modlist):
- return self.conn.add_s(dn, modlist)
-
- def search_s(self, base, scope,
- filterstr='(objectClass=*)', attrlist=None, attrsonly=0):
- return self.conn.search_s(base, scope, filterstr,
- attrlist, attrsonly)
-
- def search_ext(self, base, scope,
- filterstr='(objectClass=*)', attrlist=None, attrsonly=0,
- serverctrls=None, clientctrls=None,
- timeout=-1, sizelimit=0):
- return self.conn.search_ext(base, scope,
- filterstr, attrlist, attrsonly,
- serverctrls, clientctrls,
- timeout, sizelimit)
-
- def result3(self, msgid=ldap.RES_ANY, all=1, timeout=None,
- resp_ctrl_classes=None):
- # The resp_ctrl_classes parameter is a recent addition to the
- # API. It defaults to None. We do not anticipate using it.
- # To run with older versions of python-ldap we do not pass it.
- return self.conn.result3(msgid, all, timeout)
-
- def modify_s(self, dn, modlist):
- return self.conn.modify_s(dn, modlist)
-
- def delete_s(self, dn):
- return self.conn.delete_s(dn)
-
- def delete_ext_s(self, dn, serverctrls=None, clientctrls=None):
- return self.conn.delete_ext_s(dn, serverctrls, clientctrls)
-
-
-def _common_ldap_initialization(url, use_tls=False, tls_cacertfile=None,
- tls_cacertdir=None, tls_req_cert=None,
- debug_level=None):
- """LDAP initialization for PythonLDAPHandler and PooledLDAPHandler."""
- LOG.debug("LDAP init: url=%s", url)
- LOG.debug('LDAP init: use_tls=%s tls_cacertfile=%s tls_cacertdir=%s '
- 'tls_req_cert=%s tls_avail=%s',
- use_tls, tls_cacertfile, tls_cacertdir,
- tls_req_cert, ldap.TLS_AVAIL)
-
- if debug_level is not None:
- ldap.set_option(ldap.OPT_DEBUG_LEVEL, debug_level)
-
- using_ldaps = url.lower().startswith("ldaps")
-
- if use_tls and using_ldaps:
- raise AssertionError(_('Invalid TLS / LDAPS combination'))
-
- # The certificate trust options apply for both LDAPS and TLS.
- if use_tls or using_ldaps:
- if not ldap.TLS_AVAIL:
- raise ValueError(_('Invalid LDAP TLS_AVAIL option: %s. TLS '
- 'not available') % ldap.TLS_AVAIL)
- if tls_cacertfile:
- # NOTE(topol)
- # python ldap TLS does not verify CACERTFILE or CACERTDIR
- # so we add some extra simple sanity check verification
- # Also, setting these values globally (i.e. on the ldap object)
- # works but these values are ignored when setting them on the
- # connection
- if not os.path.isfile(tls_cacertfile):
- raise IOError(_("tls_cacertfile %s not found "
- "or is not a file") %
- tls_cacertfile)
- ldap.set_option(ldap.OPT_X_TLS_CACERTFILE, tls_cacertfile)
- elif tls_cacertdir:
- # NOTE(topol)
- # python ldap TLS does not verify CACERTFILE or CACERTDIR
- # so we add some extra simple sanity check verification
- # Also, setting these values globally (i.e. on the ldap object)
- # works but these values are ignored when setting them on the
- # connection
- if not os.path.isdir(tls_cacertdir):
- raise IOError(_("tls_cacertdir %s not found "
- "or is not a directory") %
- tls_cacertdir)
- ldap.set_option(ldap.OPT_X_TLS_CACERTDIR, tls_cacertdir)
- if tls_req_cert in list(LDAP_TLS_CERTS.values()):
- ldap.set_option(ldap.OPT_X_TLS_REQUIRE_CERT, tls_req_cert)
- else:
- LOG.debug("LDAP TLS: invalid TLS_REQUIRE_CERT Option=%s",
- tls_req_cert)
-
-
-class MsgId(list):
- """Wrapper class to hold connection and msgid."""
-
- pass
-
-
-def use_conn_pool(func):
- """Use this only for connection pool specific ldap API.
-
- This adds connection object to decorated API as next argument after self.
-
- """
- def wrapper(self, *args, **kwargs):
- # assert isinstance(self, PooledLDAPHandler)
- with self._get_pool_connection() as conn:
- self._apply_options(conn)
- return func(self, conn, *args, **kwargs)
- return wrapper
-
-
-class PooledLDAPHandler(LDAPHandler):
- """LDAPHandler implementation which uses pooled connection manager.
-
- Pool specific configuration is defined in [ldap] section.
- All other LDAP configuration is still used from [ldap] section
-
- Keystone LDAP authentication logic authenticates an end user using its DN
- and password via LDAP bind to establish supplied password is correct.
- This can fill up the pool quickly (as pool re-uses existing connection
- based on its bind data) and would not leave space in pool for connection
- re-use for other LDAP operations.
- Now a separate pool can be established for those requests when related flag
- 'use_auth_pool' is enabled. That pool can have its own size and
- connection lifetime. Other pool attributes are shared between those pools.
- If 'use_pool' is disabled, then 'use_auth_pool' does not matter.
- If 'use_auth_pool' is not enabled, then connection pooling is not used for
- those LDAP operations.
-
- Note, the python-ldap API requires all string values to be UTF-8
- encoded. The KeystoneLDAPHandler enforces this prior to invoking
- the methods in this class.
-
- """
-
- # Added here to allow override for testing
- Connector = ldappool.StateConnector
- auth_pool_prefix = 'auth_pool_'
-
- connection_pools = {} # static connector pool dict
-
- def __init__(self, conn=None, use_auth_pool=False):
- super(PooledLDAPHandler, self).__init__(conn=conn)
- self.who = ''
- self.cred = ''
- self.conn_options = {} # connection specific options
- self.page_size = None
- self.use_auth_pool = use_auth_pool
- self.conn_pool = None
-
- def connect(self, url, page_size=0, alias_dereferencing=None,
- use_tls=False, tls_cacertfile=None, tls_cacertdir=None,
- tls_req_cert='demand', chase_referrals=None, debug_level=None,
- use_pool=None, pool_size=None, pool_retry_max=None,
- pool_retry_delay=None, pool_conn_timeout=None,
- pool_conn_lifetime=None):
-
- _common_ldap_initialization(url=url,
- use_tls=use_tls,
- tls_cacertfile=tls_cacertfile,
- tls_cacertdir=tls_cacertdir,
- tls_req_cert=tls_req_cert,
- debug_level=debug_level)
-
- self.page_size = page_size
-
- # Following two options are not added in common initialization as they
- # need to follow a sequence in PythonLDAPHandler code.
- if alias_dereferencing is not None:
- self.set_option(ldap.OPT_DEREF, alias_dereferencing)
- if chase_referrals is not None:
- self.set_option(ldap.OPT_REFERRALS, int(chase_referrals))
-
- if self.use_auth_pool: # separate pool when use_auth_pool enabled
- pool_url = self.auth_pool_prefix + url
- else:
- pool_url = url
- try:
- self.conn_pool = self.connection_pools[pool_url]
- except KeyError:
- self.conn_pool = ldappool.ConnectionManager(
- url,
- size=pool_size,
- retry_max=pool_retry_max,
- retry_delay=pool_retry_delay,
- timeout=pool_conn_timeout,
- connector_cls=self.Connector,
- use_tls=use_tls,
- max_lifetime=pool_conn_lifetime)
- self.connection_pools[pool_url] = self.conn_pool
-
- def set_option(self, option, invalue):
- self.conn_options[option] = invalue
-
- def get_option(self, option):
- value = self.conn_options.get(option)
- # if option was not specified explicitly, then use connection default
- # value for that option if there.
- if value is None:
- with self._get_pool_connection() as conn:
- value = conn.get_option(option)
- return value
-
- def _apply_options(self, conn):
- # if connection has a lifetime, then it already has options specified
- if conn.get_lifetime() > 30:
- return
- for option, invalue in self.conn_options.items():
- conn.set_option(option, invalue)
-
- def _get_pool_connection(self):
- return self.conn_pool.connection(self.who, self.cred)
-
- def simple_bind_s(self, who='', cred='',
- serverctrls=None, clientctrls=None):
- # Not using use_conn_pool decorator here as this API takes cred as
- # input.
- self.who = who
- self.cred = cred
- with self._get_pool_connection() as conn:
- self._apply_options(conn)
-
- def unbind_s(self):
- # After connection generator is done `with` statement execution block
- # connection is always released via finally block in ldappool.
- # So this unbind is a no op.
- pass
-
- @use_conn_pool
- def add_s(self, conn, dn, modlist):
- return conn.add_s(dn, modlist)
-
- @use_conn_pool
- def search_s(self, conn, base, scope,
- filterstr='(objectClass=*)', attrlist=None, attrsonly=0):
- return conn.search_s(base, scope, filterstr, attrlist,
- attrsonly)
-
- def search_ext(self, base, scope,
- filterstr='(objectClass=*)', attrlist=None, attrsonly=0,
- serverctrls=None, clientctrls=None,
- timeout=-1, sizelimit=0):
- """Asynchronous API to return a ``MsgId`` instance.
-
- The ``MsgId`` instance can be safely used in a call to ``result3()``.
-
- To work with ``result3()`` API in predictable manner, the same LDAP
- connection is needed which originally provided the ``msgid``. So, this
- method wraps the existing connection and ``msgid`` in a new ``MsgId``
- instance. The connection associated with ``search_ext`` is released
- once last hard reference to the ``MsgId`` instance is freed.
-
- """
- conn_ctxt = self._get_pool_connection()
- conn = conn_ctxt.__enter__()
- try:
- msgid = conn.search_ext(base, scope,
- filterstr, attrlist, attrsonly,
- serverctrls, clientctrls,
- timeout, sizelimit)
- except Exception:
- conn_ctxt.__exit__(*sys.exc_info())
- raise
- res = MsgId((conn, msgid))
- weakref.ref(res, functools.partial(conn_ctxt.__exit__,
- None, None, None))
- return res
-
- def result3(self, msgid, all=1, timeout=None,
- resp_ctrl_classes=None):
- """This method is used to wait for and return result.
-
- This method returns the result of an operation previously initiated by
- one of the LDAP asynchronous operation routines (eg search_ext()). It
- returned an invocation identifier (a message id) upon successful
- initiation of their operation.
-
- Input msgid is expected to be instance of class MsgId which has LDAP
- session/connection used to execute search_ext and message idenfier.
-
- The connection associated with search_ext is released once last hard
- reference to MsgId object is freed. This will happen when function
- which requested msgId and used it in result3 exits.
-
- """
- conn, msg_id = msgid
- return conn.result3(msg_id, all, timeout)
-
- @use_conn_pool
- def modify_s(self, conn, dn, modlist):
- return conn.modify_s(dn, modlist)
-
- @use_conn_pool
- def delete_s(self, conn, dn):
- return conn.delete_s(dn)
-
- @use_conn_pool
- def delete_ext_s(self, conn, dn, serverctrls=None, clientctrls=None):
- return conn.delete_ext_s(dn, serverctrls, clientctrls)
-
-
-class KeystoneLDAPHandler(LDAPHandler):
- """Convert data types and perform logging.
-
- This LDAP inteface wraps the python-ldap based interfaces. The
- python-ldap interfaces require string values encoded in UTF-8. The
- OpenStack logging framework at the time of this writing is not
- capable of accepting strings encoded in UTF-8, the log functions
- will throw decoding errors if a non-ascii character appears in a
- string.
-
- Prior to the call Python data types are converted to a string
- representation as required by the LDAP APIs.
-
- Then logging is performed so we can track what is being
- sent/received from LDAP. Also the logging filters security
- sensitive items (i.e. passwords).
-
- Then the string values are encoded into UTF-8.
-
- Then the LDAP API entry point is invoked.
-
- Data returned from the LDAP call is converted back from UTF-8
- encoded strings into the Python data type used internally in
- OpenStack.
-
- """
-
- def __init__(self, conn=None):
- super(KeystoneLDAPHandler, self).__init__(conn=conn)
- self.page_size = 0
-
- def __enter__(self):
- return self
-
- def _disable_paging(self):
- # Disable the pagination from now on
- self.page_size = 0
-
- def connect(self, url, page_size=0, alias_dereferencing=None,
- use_tls=False, tls_cacertfile=None, tls_cacertdir=None,
- tls_req_cert='demand', chase_referrals=None, debug_level=None,
- use_pool=None, pool_size=None,
- pool_retry_max=None, pool_retry_delay=None,
- pool_conn_timeout=None, pool_conn_lifetime=None):
- self.page_size = page_size
- return self.conn.connect(url, page_size, alias_dereferencing,
- use_tls, tls_cacertfile, tls_cacertdir,
- tls_req_cert, chase_referrals,
- debug_level=debug_level,
- use_pool=use_pool,
- pool_size=pool_size,
- pool_retry_max=pool_retry_max,
- pool_retry_delay=pool_retry_delay,
- pool_conn_timeout=pool_conn_timeout,
- pool_conn_lifetime=pool_conn_lifetime)
-
- def set_option(self, option, invalue):
- return self.conn.set_option(option, invalue)
-
- def get_option(self, option):
- return self.conn.get_option(option)
-
- def simple_bind_s(self, who='', cred='',
- serverctrls=None, clientctrls=None):
- LOG.debug("LDAP bind: who=%s", who)
- who_utf8 = utf8_encode(who)
- cred_utf8 = utf8_encode(cred)
- return self.conn.simple_bind_s(who_utf8, cred_utf8,
- serverctrls=serverctrls,
- clientctrls=clientctrls)
-
- def unbind_s(self):
- LOG.debug("LDAP unbind")
- return self.conn.unbind_s()
-
- def add_s(self, dn, modlist):
- ldap_attrs = [(kind, [py2ldap(x) for x in safe_iter(values)])
- for kind, values in modlist]
- logging_attrs = [(kind, values
- if kind != 'userPassword'
- else ['****'])
- for kind, values in ldap_attrs]
- LOG.debug('LDAP add: dn=%s attrs=%s',
- dn, logging_attrs)
- dn_utf8 = utf8_encode(dn)
- ldap_attrs_utf8 = [(kind, [utf8_encode(x) for x in safe_iter(values)])
- for kind, values in ldap_attrs]
- return self.conn.add_s(dn_utf8, ldap_attrs_utf8)
-
- def search_s(self, base, scope,
- filterstr='(objectClass=*)', attrlist=None, attrsonly=0):
- # NOTE(morganfainberg): Remove "None" singletons from this list, which
- # allows us to set mapped attributes to "None" as defaults in config.
- # Without this filtering, the ldap query would raise a TypeError since
- # attrlist is expected to be an iterable of strings.
- if attrlist is not None:
- attrlist = [attr for attr in attrlist if attr is not None]
- LOG.debug('LDAP search: base=%s scope=%s filterstr=%s '
- 'attrs=%s attrsonly=%s',
- base, scope, filterstr, attrlist, attrsonly)
- if self.page_size:
- ldap_result = self._paged_search_s(base, scope,
- filterstr, attrlist)
- else:
- base_utf8 = utf8_encode(base)
- filterstr_utf8 = utf8_encode(filterstr)
- if attrlist is None:
- attrlist_utf8 = None
- else:
- attrlist_utf8 = list(map(utf8_encode, attrlist))
- ldap_result = self.conn.search_s(base_utf8, scope,
- filterstr_utf8,
- attrlist_utf8, attrsonly)
-
- py_result = convert_ldap_result(ldap_result)
-
- return py_result
-
- def search_ext(self, base, scope,
- filterstr='(objectClass=*)', attrlist=None, attrsonly=0,
- serverctrls=None, clientctrls=None,
- timeout=-1, sizelimit=0):
- if attrlist is not None:
- attrlist = [attr for attr in attrlist if attr is not None]
- LOG.debug('LDAP search_ext: base=%s scope=%s filterstr=%s '
- 'attrs=%s attrsonly=%s '
- 'serverctrls=%s clientctrls=%s timeout=%s sizelimit=%s',
- base, scope, filterstr, attrlist, attrsonly,
- serverctrls, clientctrls, timeout, sizelimit)
- return self.conn.search_ext(base, scope,
- filterstr, attrlist, attrsonly,
- serverctrls, clientctrls,
- timeout, sizelimit)
-
- def _paged_search_s(self, base, scope, filterstr, attrlist=None):
- res = []
- use_old_paging_api = False
- # The API for the simple paged results control changed between
- # python-ldap 2.3 and 2.4. We need to detect the capabilities
- # of the python-ldap version we are using.
- if hasattr(ldap, 'LDAP_CONTROL_PAGE_OID'):
- use_old_paging_api = True
- lc = ldap.controls.SimplePagedResultsControl(
- controlType=ldap.LDAP_CONTROL_PAGE_OID,
- criticality=True,
- controlValue=(self.page_size, ''))
- page_ctrl_oid = ldap.LDAP_CONTROL_PAGE_OID
- else:
- lc = ldap.controls.libldap.SimplePagedResultsControl(
- criticality=True,
- size=self.page_size,
- cookie='')
- page_ctrl_oid = ldap.controls.SimplePagedResultsControl.controlType
-
- base_utf8 = utf8_encode(base)
- filterstr_utf8 = utf8_encode(filterstr)
- if attrlist is None:
- attrlist_utf8 = None
- else:
- attrlist = [attr for attr in attrlist if attr is not None]
- attrlist_utf8 = list(map(utf8_encode, attrlist))
- msgid = self.conn.search_ext(base_utf8,
- scope,
- filterstr_utf8,
- attrlist_utf8,
- serverctrls=[lc])
- # Endless loop request pages on ldap server until it has no data
- while True:
- # Request to the ldap server a page with 'page_size' entries
- rtype, rdata, rmsgid, serverctrls = self.conn.result3(msgid)
- # Receive the data
- res.extend(rdata)
- pctrls = [c for c in serverctrls
- if c.controlType == page_ctrl_oid]
- if pctrls:
- # LDAP server supports pagination
- if use_old_paging_api:
- est, cookie = pctrls[0].controlValue
- lc.controlValue = (self.page_size, cookie)
- else:
- cookie = lc.cookie = pctrls[0].cookie
-
- if cookie:
- # There is more data still on the server
- # so we request another page
- msgid = self.conn.search_ext(base_utf8,
- scope,
- filterstr_utf8,
- attrlist_utf8,
- serverctrls=[lc])
- else:
- # Exit condition no more data on server
- break
- else:
- LOG.warning(_LW('LDAP Server does not support paging. '
- 'Disable paging in keystone.conf to '
- 'avoid this message.'))
- self._disable_paging()
- break
- return res
-
- def result3(self, msgid=ldap.RES_ANY, all=1, timeout=None,
- resp_ctrl_classes=None):
- ldap_result = self.conn.result3(msgid, all, timeout, resp_ctrl_classes)
-
- LOG.debug('LDAP result3: msgid=%s all=%s timeout=%s '
- 'resp_ctrl_classes=%s ldap_result=%s',
- msgid, all, timeout, resp_ctrl_classes, ldap_result)
-
- # ldap_result returned from result3 is a tuple of
- # (rtype, rdata, rmsgid, serverctrls). We don't need use of these,
- # except rdata.
- rtype, rdata, rmsgid, serverctrls = ldap_result
- py_result = convert_ldap_result(rdata)
- return py_result
-
- def modify_s(self, dn, modlist):
- ldap_modlist = [
- (op, kind, (None if values is None
- else [py2ldap(x) for x in safe_iter(values)]))
- for op, kind, values in modlist]
-
- logging_modlist = [(op, kind, (values if kind != 'userPassword'
- else ['****']))
- for op, kind, values in ldap_modlist]
- LOG.debug('LDAP modify: dn=%s modlist=%s',
- dn, logging_modlist)
-
- dn_utf8 = utf8_encode(dn)
- ldap_modlist_utf8 = [
- (op, kind, (None if values is None
- else [utf8_encode(x) for x in safe_iter(values)]))
- for op, kind, values in ldap_modlist]
- return self.conn.modify_s(dn_utf8, ldap_modlist_utf8)
-
- def delete_s(self, dn):
- LOG.debug("LDAP delete: dn=%s", dn)
- dn_utf8 = utf8_encode(dn)
- return self.conn.delete_s(dn_utf8)
-
- def delete_ext_s(self, dn, serverctrls=None, clientctrls=None):
- LOG.debug('LDAP delete_ext: dn=%s serverctrls=%s clientctrls=%s',
- dn, serverctrls, clientctrls)
- dn_utf8 = utf8_encode(dn)
- return self.conn.delete_ext_s(dn_utf8, serverctrls, clientctrls)
-
- def __exit__(self, exc_type, exc_val, exc_tb):
- self.unbind_s()
-
-
-_HANDLERS = {}
-
-
-def register_handler(prefix, handler):
- _HANDLERS[prefix] = handler
-
-
-def _get_connection(conn_url, use_pool=False, use_auth_pool=False):
- for prefix, handler in _HANDLERS.items():
- if conn_url.startswith(prefix):
- return handler()
-
- if use_pool:
- return PooledLDAPHandler(use_auth_pool=use_auth_pool)
- else:
- return PythonLDAPHandler()
-
-
-def filter_entity(entity_ref):
- """Filter out private items in an entity dict.
-
- :param entity_ref: the entity dictionary. The 'dn' field will be removed.
- 'dn' is used in LDAP, but should not be returned to the user. This
- value may be modified.
-
- :returns: entity_ref
-
- """
- if entity_ref:
- entity_ref.pop('dn', None)
- return entity_ref
-
-
-class BaseLdap(object):
- DEFAULT_OU = None
- DEFAULT_STRUCTURAL_CLASSES = None
- DEFAULT_ID_ATTR = 'cn'
- DEFAULT_OBJECTCLASS = None
- DEFAULT_FILTER = None
- DEFAULT_EXTRA_ATTR_MAPPING = []
- DUMB_MEMBER_DN = 'cn=dumb,dc=nonexistent'
- NotFound = None
- notfound_arg = None
- options_name = None
- model = None
- attribute_options_names = {}
- immutable_attrs = []
- attribute_ignore = []
- tree_dn = None
-
- def __init__(self, conf):
- self.LDAP_URL = conf.ldap.url
- self.LDAP_USER = conf.ldap.user
- self.LDAP_PASSWORD = conf.ldap.password
- self.LDAP_SCOPE = ldap_scope(conf.ldap.query_scope)
- self.alias_dereferencing = parse_deref(conf.ldap.alias_dereferencing)
- self.page_size = conf.ldap.page_size
- self.use_tls = conf.ldap.use_tls
- self.tls_cacertfile = conf.ldap.tls_cacertfile
- self.tls_cacertdir = conf.ldap.tls_cacertdir
- self.tls_req_cert = parse_tls_cert(conf.ldap.tls_req_cert)
- self.attribute_mapping = {}
- self.chase_referrals = conf.ldap.chase_referrals
- self.debug_level = conf.ldap.debug_level
-
- # LDAP Pool specific attribute
- self.use_pool = conf.ldap.use_pool
- self.pool_size = conf.ldap.pool_size
- self.pool_retry_max = conf.ldap.pool_retry_max
- self.pool_retry_delay = conf.ldap.pool_retry_delay
- self.pool_conn_timeout = conf.ldap.pool_connection_timeout
- self.pool_conn_lifetime = conf.ldap.pool_connection_lifetime
-
- # End user authentication pool specific config attributes
- self.use_auth_pool = self.use_pool and conf.ldap.use_auth_pool
- self.auth_pool_size = conf.ldap.auth_pool_size
- self.auth_pool_conn_lifetime = conf.ldap.auth_pool_connection_lifetime
-
- if self.options_name is not None:
- self.suffix = conf.ldap.suffix
- dn = '%s_tree_dn' % self.options_name
- self.tree_dn = (getattr(conf.ldap, dn)
- or '%s,%s' % (self.DEFAULT_OU, self.suffix))
-
- idatt = '%s_id_attribute' % self.options_name
- self.id_attr = getattr(conf.ldap, idatt) or self.DEFAULT_ID_ATTR
-
- objclass = '%s_objectclass' % self.options_name
- self.object_class = (getattr(conf.ldap, objclass)
- or self.DEFAULT_OBJECTCLASS)
-
- for k, v in self.attribute_options_names.items():
- v = '%s_%s_attribute' % (self.options_name, v)
- self.attribute_mapping[k] = getattr(conf.ldap, v)
-
- attr_mapping_opt = ('%s_additional_attribute_mapping' %
- self.options_name)
- attr_mapping = (getattr(conf.ldap, attr_mapping_opt)
- or self.DEFAULT_EXTRA_ATTR_MAPPING)
- self.extra_attr_mapping = self._parse_extra_attrs(attr_mapping)
-
- ldap_filter = '%s_filter' % self.options_name
- self.ldap_filter = getattr(conf.ldap,
- ldap_filter) or self.DEFAULT_FILTER
-
- allow_create = '%s_allow_create' % self.options_name
- self.allow_create = getattr(conf.ldap, allow_create)
-
- allow_update = '%s_allow_update' % self.options_name
- self.allow_update = getattr(conf.ldap, allow_update)
-
- allow_delete = '%s_allow_delete' % self.options_name
- self.allow_delete = getattr(conf.ldap, allow_delete)
-
- member_attribute = '%s_member_attribute' % self.options_name
- self.member_attribute = getattr(conf.ldap, member_attribute, None)
-
- self.structural_classes = self.DEFAULT_STRUCTURAL_CLASSES
-
- if self.notfound_arg is None:
- self.notfound_arg = self.options_name + '_id'
-
- attribute_ignore = '%s_attribute_ignore' % self.options_name
- self.attribute_ignore = getattr(conf.ldap, attribute_ignore)
-
- self.use_dumb_member = conf.ldap.use_dumb_member
- self.dumb_member = (conf.ldap.dumb_member or
- self.DUMB_MEMBER_DN)
-
- self.subtree_delete_enabled = conf.ldap.allow_subtree_delete
-
- def _not_found(self, object_id):
- if self.NotFound is None:
- return exception.NotFound(target=object_id)
- else:
- return self.NotFound(**{self.notfound_arg: object_id})
-
- def _parse_extra_attrs(self, option_list):
- mapping = {}
- for item in option_list:
- try:
- ldap_attr, attr_map = item.split(':')
- except Exception:
- LOG.warning(_LW(
- 'Invalid additional attribute mapping: "%s". '
- 'Format must be <ldap_attribute>:<keystone_attribute>'),
- item)
- continue
- mapping[ldap_attr] = attr_map
- return mapping
-
- def _is_dumb_member(self, member_dn):
- """Checks that member is a dumb member.
-
- :param member_dn: DN of member to be checked.
- """
- return (self.use_dumb_member
- and is_dn_equal(member_dn, self.dumb_member))
-
- def get_connection(self, user=None, password=None, end_user_auth=False):
- use_pool = self.use_pool
- pool_size = self.pool_size
- pool_conn_lifetime = self.pool_conn_lifetime
-
- if end_user_auth:
- if not self.use_auth_pool:
- use_pool = False
- else:
- pool_size = self.auth_pool_size
- pool_conn_lifetime = self.auth_pool_conn_lifetime
-
- conn = _get_connection(self.LDAP_URL, use_pool,
- use_auth_pool=end_user_auth)
-
- conn = KeystoneLDAPHandler(conn=conn)
-
- conn.connect(self.LDAP_URL,
- page_size=self.page_size,
- alias_dereferencing=self.alias_dereferencing,
- use_tls=self.use_tls,
- tls_cacertfile=self.tls_cacertfile,
- tls_cacertdir=self.tls_cacertdir,
- tls_req_cert=self.tls_req_cert,
- chase_referrals=self.chase_referrals,
- debug_level=self.debug_level,
- use_pool=use_pool,
- pool_size=pool_size,
- pool_retry_max=self.pool_retry_max,
- pool_retry_delay=self.pool_retry_delay,
- pool_conn_timeout=self.pool_conn_timeout,
- pool_conn_lifetime=pool_conn_lifetime
- )
-
- if user is None:
- user = self.LDAP_USER
-
- if password is None:
- password = self.LDAP_PASSWORD
-
- # not all LDAP servers require authentication, so we don't bind
- # if we don't have any user/pass
- if user and password:
- conn.simple_bind_s(user, password)
-
- return conn
-
- def _id_to_dn_string(self, object_id):
- return u'%s=%s,%s' % (self.id_attr,
- ldap.dn.escape_dn_chars(
- six.text_type(object_id)),
- self.tree_dn)
-
- def _id_to_dn(self, object_id):
- if self.LDAP_SCOPE == ldap.SCOPE_ONELEVEL:
- return self._id_to_dn_string(object_id)
- with self.get_connection() as conn:
- search_result = conn.search_s(
- self.tree_dn, self.LDAP_SCOPE,
- u'(&(%(id_attr)s=%(id)s)(objectclass=%(objclass)s))' %
- {'id_attr': self.id_attr,
- 'id': ldap.filter.escape_filter_chars(
- six.text_type(object_id)),
- 'objclass': self.object_class},
- attrlist=DN_ONLY)
- if search_result:
- dn, attrs = search_result[0]
- return dn
- else:
- return self._id_to_dn_string(object_id)
-
- @staticmethod
- def _dn_to_id(dn):
- return utf8_decode(ldap.dn.str2dn(utf8_encode(dn))[0][0][1])
-
- def _ldap_res_to_model(self, res):
- # LDAP attribute names may be returned in a different case than
- # they are defined in the mapping, so we need to check for keys
- # in a case-insensitive way. We use the case specified in the
- # mapping for the model to ensure we have a predictable way of
- # retrieving values later.
- lower_res = {k.lower(): v for k, v in res[1].items()}
-
- id_attrs = lower_res.get(self.id_attr.lower())
- if not id_attrs:
- message = _('ID attribute %(id_attr)s not found in LDAP '
- 'object %(dn)s') % ({'id_attr': self.id_attr,
- 'dn': res[0]})
- raise exception.NotFound(message=message)
- if len(id_attrs) > 1:
- # FIXME(gyee): if this is a multi-value attribute and it has
- # multiple values, we can't use it as ID. Retain the dn_to_id
- # logic here so it does not potentially break existing
- # deployments. We need to fix our read-write LDAP logic so
- # it does not get the ID from DN.
- message = _LW('ID attribute %(id_attr)s for LDAP object %(dn)s '
- 'has multiple values and therefore cannot be used '
- 'as an ID. Will get the ID from DN instead') % (
- {'id_attr': self.id_attr,
- 'dn': res[0]})
- LOG.warning(message)
- id_val = self._dn_to_id(res[0])
- else:
- id_val = id_attrs[0]
- obj = self.model(id=id_val)
-
- for k in obj.known_keys:
- if k in self.attribute_ignore:
- continue
-
- try:
- map_attr = self.attribute_mapping.get(k, k)
- if map_attr is None:
- # Ignore attributes that are mapped to None.
- continue
-
- v = lower_res[map_attr.lower()]
- except KeyError: # nosec
- # Didn't find the attr, so don't add it.
- pass
- else:
- try:
- obj[k] = v[0]
- except IndexError:
- obj[k] = None
-
- return obj
-
- def check_allow_create(self):
- if not self.allow_create:
- action = _('LDAP %s create') % self.options_name
- raise exception.ForbiddenAction(action=action)
-
- def check_allow_update(self):
- if not self.allow_update:
- action = _('LDAP %s update') % self.options_name
- raise exception.ForbiddenAction(action=action)
-
- def check_allow_delete(self):
- if not self.allow_delete:
- action = _('LDAP %s delete') % self.options_name
- raise exception.ForbiddenAction(action=action)
-
- def affirm_unique(self, values):
- if values.get('name') is not None:
- try:
- self.get_by_name(values['name'])
- except exception.NotFound: # nosec
- # Didn't find it so it's unique, good.
- pass
- else:
- raise exception.Conflict(type=self.options_name,
- details=_('Duplicate name, %s.') %
- values['name'])
-
- if values.get('id') is not None:
- try:
- self.get(values['id'])
- except exception.NotFound: # nosec
- # Didn't find it, so it's unique, good.
- pass
- else:
- raise exception.Conflict(type=self.options_name,
- details=_('Duplicate ID, %s.') %
- values['id'])
-
- def create(self, values):
- self.affirm_unique(values)
- object_classes = self.structural_classes + [self.object_class]
- attrs = [('objectClass', object_classes)]
- for k, v in values.items():
- if k in self.attribute_ignore:
- continue
- if k == 'id':
- # no need to check if v is None as 'id' will always have
- # a value
- attrs.append((self.id_attr, [v]))
- elif v is not None:
- attr_type = self.attribute_mapping.get(k, k)
- if attr_type is not None:
- attrs.append((attr_type, [v]))
- extra_attrs = [attr for attr, name
- in self.extra_attr_mapping.items()
- if name == k]
- for attr in extra_attrs:
- attrs.append((attr, [v]))
-
- if 'groupOfNames' in object_classes and self.use_dumb_member:
- attrs.append(('member', [self.dumb_member]))
- with self.get_connection() as conn:
- conn.add_s(self._id_to_dn(values['id']), attrs)
- return values
-
- def _ldap_get(self, object_id, ldap_filter=None):
- query = (u'(&(%(id_attr)s=%(id)s)'
- u'%(filter)s'
- u'(objectClass=%(object_class)s))'
- % {'id_attr': self.id_attr,
- 'id': ldap.filter.escape_filter_chars(
- six.text_type(object_id)),
- 'filter': (ldap_filter or self.ldap_filter or ''),
- 'object_class': self.object_class})
- with self.get_connection() as conn:
- try:
- attrs = list(set(([self.id_attr] +
- list(self.attribute_mapping.values()) +
- list(self.extra_attr_mapping.keys()))))
- res = conn.search_s(self.tree_dn,
- self.LDAP_SCOPE,
- query,
- attrs)
- except ldap.NO_SUCH_OBJECT:
- return None
- try:
- return res[0]
- except IndexError:
- return None
-
- def _ldap_get_limited(self, base, scope, filterstr, attrlist, sizelimit):
- with self.get_connection() as conn:
- try:
- control = ldap.controls.libldap.SimplePagedResultsControl(
- criticality=True,
- size=sizelimit,
- cookie='')
- msgid = conn.search_ext(base, scope, filterstr, attrlist,
- serverctrls=[control])
- rdata = conn.result3(msgid)
- return rdata
- except ldap.NO_SUCH_OBJECT:
- return []
-
- @driver_hints.truncated
- def _ldap_get_all(self, hints, ldap_filter=None):
- query = u'(&%s(objectClass=%s)(%s=*))' % (
- ldap_filter or self.ldap_filter or '',
- self.object_class,
- self.id_attr)
- sizelimit = 0
- attrs = list(set(([self.id_attr] +
- list(self.attribute_mapping.values()) +
- list(self.extra_attr_mapping.keys()))))
- if hints.limit:
- sizelimit = hints.limit['limit']
- return self._ldap_get_limited(self.tree_dn,
- self.LDAP_SCOPE,
- query,
- attrs,
- sizelimit)
- with self.get_connection() as conn:
- try:
- return conn.search_s(self.tree_dn,
- self.LDAP_SCOPE,
- query,
- attrs)
- except ldap.NO_SUCH_OBJECT:
- return []
-
- def _ldap_get_list(self, search_base, scope, query_params=None,
- attrlist=None):
- query = u'(objectClass=%s)' % self.object_class
- if query_params:
-
- def calc_filter(attrname, value):
- val_esc = ldap.filter.escape_filter_chars(value)
- return '(%s=%s)' % (attrname, val_esc)
-
- query = (u'(&%s%s)' %
- (query, ''.join([calc_filter(k, v) for k, v in
- query_params.items()])))
- with self.get_connection() as conn:
- return conn.search_s(search_base, scope, query, attrlist)
-
- def get(self, object_id, ldap_filter=None):
- res = self._ldap_get(object_id, ldap_filter)
- if res is None:
- raise self._not_found(object_id)
- else:
- return self._ldap_res_to_model(res)
-
- def get_by_name(self, name, ldap_filter=None):
- query = (u'(%s=%s)' % (self.attribute_mapping['name'],
- ldap.filter.escape_filter_chars(
- six.text_type(name))))
- res = self.get_all(query)
- try:
- return res[0]
- except IndexError:
- raise self._not_found(name)
-
- def get_all(self, ldap_filter=None, hints=None):
- hints = hints or driver_hints.Hints()
- return [self._ldap_res_to_model(x)
- for x in self._ldap_get_all(hints, ldap_filter)]
-
- def update(self, object_id, values, old_obj=None):
- if old_obj is None:
- old_obj = self.get(object_id)
-
- modlist = []
- for k, v in values.items():
- if k == 'id':
- # id can't be modified.
- continue
-
- if k in self.attribute_ignore:
-
- # Handle 'enabled' specially since can't disable if ignored.
- if k == 'enabled' and (not v):
- action = _("Disabling an entity where the 'enable' "
- "attribute is ignored by configuration.")
- raise exception.ForbiddenAction(action=action)
-
- continue
-
- # attribute value has not changed
- if k in old_obj and old_obj[k] == v:
- continue
-
- if k in self.immutable_attrs:
- msg = (_("Cannot change %(option_name)s %(attr)s") %
- {'option_name': self.options_name, 'attr': k})
- raise exception.ValidationError(msg)
-
- if v is None:
- if old_obj.get(k) is not None:
- modlist.append((ldap.MOD_DELETE,
- self.attribute_mapping.get(k, k),
- None))
- continue
-
- current_value = old_obj.get(k)
- if current_value is None:
- op = ldap.MOD_ADD
- modlist.append((op, self.attribute_mapping.get(k, k), [v]))
- elif current_value != v:
- op = ldap.MOD_REPLACE
- modlist.append((op, self.attribute_mapping.get(k, k), [v]))
-
- if modlist:
- with self.get_connection() as conn:
- try:
- conn.modify_s(self._id_to_dn(object_id), modlist)
- except ldap.NO_SUCH_OBJECT:
- raise self._not_found(object_id)
-
- return self.get(object_id)
-
- def delete(self, object_id):
- with self.get_connection() as conn:
- try:
- conn.delete_s(self._id_to_dn(object_id))
- except ldap.NO_SUCH_OBJECT:
- raise self._not_found(object_id)
-
- def delete_tree(self, object_id):
- tree_delete_control = ldap.controls.LDAPControl(CONTROL_TREEDELETE,
- 0,
- None)
- with self.get_connection() as conn:
- try:
- conn.delete_ext_s(self._id_to_dn(object_id),
- serverctrls=[tree_delete_control])
- except ldap.NO_SUCH_OBJECT:
- raise self._not_found(object_id)
- except ldap.NOT_ALLOWED_ON_NONLEAF:
- # Most LDAP servers do not support the tree_delete_control.
- # In these servers, the usual idiom is to first perform a
- # search to get the entries to delete, then delete them in
- # in order of child to parent, since LDAP forbids the
- # deletion of a parent entry before deleting the children
- # of that parent. The simplest way to do that is to delete
- # the entries in order of the length of the DN, from longest
- # to shortest DN.
- dn = self._id_to_dn(object_id)
- scope = ldap.SCOPE_SUBTREE
- # With some directory servers, an entry with objectclass
- # ldapsubentry will not be returned unless it is explicitly
- # requested, by specifying the objectclass in the search
- # filter. We must specify this, with objectclass=*, in an
- # LDAP filter OR clause, in order to return all entries
- filt = '(|(objectclass=*)(objectclass=ldapsubentry))'
- # We only need the DNs of the entries. Since no attributes
- # will be returned, we do not have to specify attrsonly=1.
- entries = conn.search_s(dn, scope, filt, attrlist=DN_ONLY)
- if entries:
- for dn in sorted((e[0] for e in entries),
- key=len, reverse=True):
- conn.delete_s(dn)
- else:
- LOG.debug('No entries in LDAP subtree %s', dn)
-
- def add_member(self, member_dn, member_list_dn):
- """Add member to the member list.
-
- :param member_dn: DN of member to be added.
- :param member_list_dn: DN of group to which the
- member will be added.
-
- :raises keystone.exception.Conflict: If the user was already a member.
- :raises self.NotFound: If the group entry didn't exist.
- """
- with self.get_connection() as conn:
- try:
- mod = (ldap.MOD_ADD, self.member_attribute, member_dn)
- conn.modify_s(member_list_dn, [mod])
- except ldap.TYPE_OR_VALUE_EXISTS:
- raise exception.Conflict(_('Member %(member)s '
- 'is already a member'
- ' of group %(group)s') % {
- 'member': member_dn,
- 'group': member_list_dn})
- except ldap.NO_SUCH_OBJECT:
- raise self._not_found(member_list_dn)
-
- def remove_member(self, member_dn, member_list_dn):
- """Remove member from the member list.
-
- :param member_dn: DN of member to be removed.
- :param member_list_dn: DN of group from which the
- member will be removed.
-
- :raises self.NotFound: If the group entry didn't exist.
- :raises ldap.NO_SUCH_ATTRIBUTE: If the user wasn't a member.
- """
- with self.get_connection() as conn:
- try:
- mod = (ldap.MOD_DELETE, self.member_attribute, member_dn)
- conn.modify_s(member_list_dn, [mod])
- except ldap.NO_SUCH_OBJECT:
- raise self._not_found(member_list_dn)
-
- def _delete_tree_nodes(self, search_base, scope, query_params=None):
- query = u'(objectClass=%s)' % self.object_class
- if query_params:
- query = (u'(&%s%s)' %
- (query, ''.join(['(%s=%s)'
- % (k, ldap.filter.escape_filter_chars(v))
- for k, v in
- query_params.items()])))
- not_deleted_nodes = []
- with self.get_connection() as conn:
- try:
- nodes = conn.search_s(search_base, scope, query,
- attrlist=DN_ONLY)
- except ldap.NO_SUCH_OBJECT:
- LOG.debug('Could not find entry with dn=%s', search_base)
- raise self._not_found(self._dn_to_id(search_base))
- else:
- for node_dn, _t in nodes:
- try:
- conn.delete_s(node_dn)
- except ldap.NO_SUCH_OBJECT:
- not_deleted_nodes.append(node_dn)
-
- if not_deleted_nodes:
- LOG.warning(_LW("When deleting entries for %(search_base)s, "
- "could not delete nonexistent entries "
- "%(entries)s%(dots)s"),
- {'search_base': search_base,
- 'entries': not_deleted_nodes[:3],
- 'dots': '...' if len(not_deleted_nodes) > 3 else ''})
-
- def filter_query(self, hints, query=None):
- """Applies filtering to a query.
-
- :param hints: contains the list of filters, which may be None,
- indicating that there are no filters to be applied.
- If it's not None, then any filters satisfied here will be
- removed so that the caller will know if any filters
- remain to be applied.
- :param query: LDAP query into which to include filters
-
- :returns query: LDAP query, updated with any filters satisfied
-
- """
- def build_filter(filter_, hints):
- """Build a filter for the query.
-
- :param filter_: the dict that describes this filter
- :param hints: contains the list of filters yet to be satisfied.
-
- :returns query: LDAP query term to be added
-
- """
- ldap_attr = self.attribute_mapping[filter_['name']]
- val_esc = ldap.filter.escape_filter_chars(filter_['value'])
-
- if filter_['case_sensitive']:
- # NOTE(henry-nash): Although dependent on the schema being
- # used, most LDAP attributes are configured with case
- # insensitive matching rules, so we'll leave this to the
- # controller to filter.
- return
-
- if filter_['name'] == 'enabled':
- # NOTE(henry-nash): Due to the different options for storing
- # the enabled attribute (e,g, emulated or not), for now we
- # don't try and filter this at the driver level - we simply
- # leave the filter to be handled by the controller. It seems
- # unlikley that this will cause a signifcant performance
- # issue.
- return
-
- # TODO(henry-nash): Currently there are no booleans (other than
- # 'enabled' that is handled above) on which you can filter. If
- # there were, we would need to add special handling here to
- # convert the booleans values to 'TRUE' and 'FALSE'. To do that
- # we would also need to know which filter keys were actually
- # booleans (this is related to bug #1411478).
-
- if filter_['comparator'] == 'equals':
- query_term = (u'(%(attr)s=%(val)s)'
- % {'attr': ldap_attr, 'val': val_esc})
- elif filter_['comparator'] == 'contains':
- query_term = (u'(%(attr)s=*%(val)s*)'
- % {'attr': ldap_attr, 'val': val_esc})
- elif filter_['comparator'] == 'startswith':
- query_term = (u'(%(attr)s=%(val)s*)'
- % {'attr': ldap_attr, 'val': val_esc})
- elif filter_['comparator'] == 'endswith':
- query_term = (u'(%(attr)s=*%(val)s)'
- % {'attr': ldap_attr, 'val': val_esc})
- else:
- # It's a filter we don't understand, so let the caller
- # work out if they need to do something with it.
- return
-
- return query_term
-
- if query is None:
- # make sure query is a string so the ldap filter is properly
- # constructed from filter_list later
- query = ''
-
- if hints is None:
- return query
-
- filter_list = []
- satisfied_filters = []
-
- for filter_ in hints.filters:
- if filter_['name'] not in self.attribute_mapping:
- continue
- new_filter = build_filter(filter_, hints)
- if new_filter is not None:
- filter_list.append(new_filter)
- satisfied_filters.append(filter_)
-
- if filter_list:
- query = u'(&%s%s)' % (query, ''.join(filter_list))
-
- # Remove satisfied filters, then the caller will know remaining filters
- for filter_ in satisfied_filters:
- hints.filters.remove(filter_)
-
- return query
-
-
-class EnabledEmuMixIn(BaseLdap):
- """Emulates boolean 'enabled' attribute if turned on.
-
- Creates a group holding all enabled objects of this class, all missing
- objects are considered disabled.
-
- Options:
-
- * $name_enabled_emulation - boolean, on/off
- * $name_enabled_emulation_dn - DN of that group, default is
- cn=enabled_${name}s,${tree_dn}
- * $name_enabled_emulation_use_group_config - boolean, on/off
-
- Where ${name}s is the plural of self.options_name ('users' or 'tenants'),
- ${tree_dn} is self.tree_dn.
- """
-
- DEFAULT_GROUP_OBJECTCLASS = 'groupOfNames'
- DEFAULT_MEMBER_ATTRIBUTE = 'member'
-
- def __init__(self, conf):
- super(EnabledEmuMixIn, self).__init__(conf)
- enabled_emulation = '%s_enabled_emulation' % self.options_name
- self.enabled_emulation = getattr(conf.ldap, enabled_emulation)
-
- enabled_emulation_dn = '%s_enabled_emulation_dn' % self.options_name
- self.enabled_emulation_dn = getattr(conf.ldap, enabled_emulation_dn)
-
- use_group_config = ('%s_enabled_emulation_use_group_config' %
- self.options_name)
- self.use_group_config = getattr(conf.ldap, use_group_config)
-
- if not self.use_group_config:
- self.member_attribute = self.DEFAULT_MEMBER_ATTRIBUTE
- self.group_objectclass = self.DEFAULT_GROUP_OBJECTCLASS
- else:
- self.member_attribute = conf.ldap.group_member_attribute
- self.group_objectclass = conf.ldap.group_objectclass
-
- if not self.enabled_emulation_dn:
- naming_attr_name = 'cn'
- naming_attr_value = 'enabled_%ss' % self.options_name
- sub_vals = (naming_attr_name, naming_attr_value, self.tree_dn)
- self.enabled_emulation_dn = '%s=%s,%s' % sub_vals
- naming_attr = (naming_attr_name, [naming_attr_value])
- else:
- # Extract the attribute name and value from the configured DN.
- naming_dn = ldap.dn.str2dn(utf8_encode(self.enabled_emulation_dn))
- naming_rdn = naming_dn[0][0]
- naming_attr = (utf8_decode(naming_rdn[0]),
- utf8_decode(naming_rdn[1]))
- self.enabled_emulation_naming_attr = naming_attr
-
- def _get_enabled(self, object_id, conn):
- dn = self._id_to_dn(object_id)
- query = '(%s=%s)' % (self.member_attribute,
- ldap.filter.escape_filter_chars(dn))
- try:
- enabled_value = conn.search_s(self.enabled_emulation_dn,
- ldap.SCOPE_BASE,
- query, attrlist=DN_ONLY)
- except ldap.NO_SUCH_OBJECT:
- return False
- else:
- return bool(enabled_value)
-
- def _add_enabled(self, object_id):
- with self.get_connection() as conn:
- if not self._get_enabled(object_id, conn):
- modlist = [(ldap.MOD_ADD,
- self.member_attribute,
- [self._id_to_dn(object_id)])]
- try:
- conn.modify_s(self.enabled_emulation_dn, modlist)
- except ldap.NO_SUCH_OBJECT:
- attr_list = [('objectClass', [self.group_objectclass]),
- (self.member_attribute,
- [self._id_to_dn(object_id)]),
- self.enabled_emulation_naming_attr]
- if self.use_dumb_member:
- attr_list[1][1].append(self.dumb_member)
- conn.add_s(self.enabled_emulation_dn, attr_list)
-
- def _remove_enabled(self, object_id):
- modlist = [(ldap.MOD_DELETE,
- self.member_attribute,
- [self._id_to_dn(object_id)])]
- with self.get_connection() as conn:
- try:
- conn.modify_s(self.enabled_emulation_dn, modlist)
- except (ldap.NO_SUCH_OBJECT, ldap.NO_SUCH_ATTRIBUTE): # nosec
- # It's already gone, good.
- pass
-
- def create(self, values):
- if self.enabled_emulation:
- enabled_value = values.pop('enabled', True)
- ref = super(EnabledEmuMixIn, self).create(values)
- if 'enabled' not in self.attribute_ignore:
- if enabled_value:
- self._add_enabled(ref['id'])
- ref['enabled'] = enabled_value
- return ref
- else:
- return super(EnabledEmuMixIn, self).create(values)
-
- def get(self, object_id, ldap_filter=None):
- with self.get_connection() as conn:
- ref = super(EnabledEmuMixIn, self).get(object_id, ldap_filter)
- if ('enabled' not in self.attribute_ignore and
- self.enabled_emulation):
- ref['enabled'] = self._get_enabled(object_id, conn)
- return ref
-
- def get_all(self, ldap_filter=None, hints=None):
- hints = hints or driver_hints.Hints()
- if 'enabled' not in self.attribute_ignore and self.enabled_emulation:
- # had to copy BaseLdap.get_all here to ldap_filter by DN
- tenant_list = [self._ldap_res_to_model(x)
- for x in self._ldap_get_all(hints, ldap_filter)
- if x[0] != self.enabled_emulation_dn]
- with self.get_connection() as conn:
- for tenant_ref in tenant_list:
- tenant_ref['enabled'] = self._get_enabled(
- tenant_ref['id'], conn)
- return tenant_list
- else:
- return super(EnabledEmuMixIn, self).get_all(ldap_filter, hints)
-
- def update(self, object_id, values, old_obj=None):
- if 'enabled' not in self.attribute_ignore and self.enabled_emulation:
- data = values.copy()
- enabled_value = data.pop('enabled', None)
- ref = super(EnabledEmuMixIn, self).update(object_id, data, old_obj)
- if enabled_value is not None:
- if enabled_value:
- self._add_enabled(object_id)
- else:
- self._remove_enabled(object_id)
- ref['enabled'] = enabled_value
- return ref
- else:
- return super(EnabledEmuMixIn, self).update(
- object_id, values, old_obj)
-
- def delete(self, object_id):
- if self.enabled_emulation:
- self._remove_enabled(object_id)
- super(EnabledEmuMixIn, self).delete(object_id)
diff --git a/keystone-moon/keystone/common/manager.py b/keystone-moon/keystone/common/manager.py
deleted file mode 100644
index 4ce9f2a6..00000000
--- a/keystone-moon/keystone/common/manager.py
+++ /dev/null
@@ -1,220 +0,0 @@
-# Copyright 2012 OpenStack Foundation
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import functools
-import inspect
-import time
-import types
-
-from oslo_log import log
-from oslo_log import versionutils
-from oslo_utils import importutils
-from oslo_utils import reflection
-import six
-import stevedore
-
-from keystone.i18n import _
-
-
-LOG = log.getLogger(__name__)
-
-
-def response_truncated(f):
- """Truncate the list returned by the wrapped function.
-
- This is designed to wrap Manager list_{entity} methods to ensure that
- any list limits that are defined are passed to the driver layer. If a
- hints list is provided, the wrapper will insert the relevant limit into
- the hints so that the underlying driver call can try and honor it. If the
- driver does truncate the response, it will update the 'truncated' attribute
- in the 'limit' entry in the hints list, which enables the caller of this
- function to know if truncation has taken place. If, however, the driver
- layer is unable to perform truncation, the 'limit' entry is simply left in
- the hints list for the caller to handle.
-
- A _get_list_limit() method is required to be present in the object class
- hierarchy, which returns the limit for this backend to which we will
- truncate.
-
- If a hints list is not provided in the arguments of the wrapped call then
- any limits set in the config file are ignored. This allows internal use
- of such wrapped methods where the entire data set is needed as input for
- the calculations of some other API (e.g. get role assignments for a given
- project).
-
- """
- @functools.wraps(f)
- def wrapper(self, *args, **kwargs):
- if kwargs.get('hints') is None:
- return f(self, *args, **kwargs)
-
- list_limit = self.driver._get_list_limit()
- if list_limit:
- kwargs['hints'].set_limit(list_limit)
- return f(self, *args, **kwargs)
- return wrapper
-
-
-def load_driver(namespace, driver_name, *args):
- try:
- driver_manager = stevedore.DriverManager(namespace,
- driver_name,
- invoke_on_load=True,
- invoke_args=args)
- return driver_manager.driver
- except RuntimeError as e:
- LOG.debug('Failed to load %r using stevedore: %s', driver_name, e)
- # Ignore failure and continue on.
-
- driver = importutils.import_object(driver_name, *args)
-
- msg = (_(
- 'Direct import of driver %(name)r is deprecated as of Liberty in '
- 'favor of its entrypoint from %(namespace)r and may be removed in '
- 'N.') %
- {'name': driver_name, 'namespace': namespace})
- versionutils.report_deprecated_feature(LOG, msg)
-
- return driver
-
-
-class _TraceMeta(type):
- """A metaclass that, in trace mode, will log entry and exit of methods.
-
- This metaclass automatically wraps all methods on the class when
- instantiated with a decorator that will log entry/exit from a method
- when keystone is run in Trace log level.
- """
-
- @staticmethod
- def wrapper(__f, __classname):
- __argspec = inspect.getargspec(__f)
- __fn_info = '%(module)s.%(classname)s.%(funcname)s' % {
- 'module': inspect.getmodule(__f).__name__,
- 'classname': __classname,
- 'funcname': __f.__name__
- }
- # NOTE(morganfainberg): Omit "cls" and "self" when printing trace logs
- # the index can be calculated at wrap time rather than at runtime.
- if __argspec.args and __argspec.args[0] in ('self', 'cls'):
- __arg_idx = 1
- else:
- __arg_idx = 0
-
- @functools.wraps(__f)
- def wrapped(*args, **kwargs):
- __exc = None
- __t = time.time()
- __do_trace = LOG.logger.getEffectiveLevel() <= log.TRACE
- __ret_val = None
- try:
- if __do_trace:
- LOG.trace('CALL => %s', __fn_info)
- __ret_val = __f(*args, **kwargs)
- except Exception as e: # nosec
- __exc = e
- raise
- finally:
- if __do_trace:
- __subst = {
- 'run_time': (time.time() - __t),
- 'passed_args': ', '.join([
- ', '.join([repr(a)
- for a in args[__arg_idx:]]),
- ', '.join(['%(k)s=%(v)r' % {'k': k, 'v': v}
- for k, v in kwargs.items()]),
- ]),
- 'function': __fn_info,
- 'exception': __exc,
- 'ret_val': __ret_val,
- }
- if __exc is not None:
- __msg = ('[%(run_time)ss] %(function)s '
- '(%(passed_args)s) => raised '
- '%(exception)r')
- else:
- # TODO(morganfainberg): find a way to indicate if this
- # was a cache hit or cache miss.
- __msg = ('[%(run_time)ss] %(function)s'
- '(%(passed_args)s) => %(ret_val)r')
- LOG.trace(__msg, __subst)
- return __ret_val
- return wrapped
-
- def __new__(meta, classname, bases, class_dict):
- final_cls_dict = {}
- for attr_name, attr in class_dict.items():
- # NOTE(morganfainberg): only wrap public instances and methods.
- if (isinstance(attr, types.FunctionType) and
- not attr_name.startswith('_')):
- attr = _TraceMeta.wrapper(attr, classname)
- final_cls_dict[attr_name] = attr
- return type.__new__(meta, classname, bases, final_cls_dict)
-
-
-@six.add_metaclass(_TraceMeta)
-class Manager(object):
- """Base class for intermediary request layer.
-
- The Manager layer exists to support additional logic that applies to all
- or some of the methods exposed by a service that are not specific to the
- HTTP interface.
-
- It also provides a stable entry point to dynamic backends.
-
- An example of a probable use case is logging all the calls.
-
- """
-
- driver_namespace = None
-
- def __init__(self, driver_name):
- self.driver = load_driver(self.driver_namespace, driver_name)
-
- def __getattr__(self, name):
- """Forward calls to the underlying driver."""
- f = getattr(self.driver, name)
- setattr(self, name, f)
- return f
-
-
-def create_legacy_driver(driver_class):
- """Helper function to deprecate the original driver classes.
-
- The keystone.{subsystem}.Driver classes are deprecated in favor of the
- new versioned classes. This function creates a new class based on a
- versioned class and adds a deprecation message when it is used.
-
- This will allow existing custom drivers to work when the Driver class is
- renamed to include a version.
-
- Example usage:
-
- Driver = create_legacy_driver(CatalogDriverV8)
-
- """
- module_name = driver_class.__module__
- class_name = reflection.get_class_name(driver_class)
-
- class Driver(driver_class):
-
- @versionutils.deprecated(
- as_of=versionutils.deprecated.LIBERTY,
- what='%s.Driver' % module_name,
- in_favor_of=class_name,
- remove_in=+2)
- def __init__(self, *args, **kwargs):
- super(Driver, self).__init__(*args, **kwargs)
-
- return Driver
diff --git a/keystone-moon/keystone/common/models.py b/keystone-moon/keystone/common/models.py
deleted file mode 100644
index de996522..00000000
--- a/keystone-moon/keystone/common/models.py
+++ /dev/null
@@ -1,196 +0,0 @@
-# Copyright (C) 2011 OpenStack Foundation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""Base model for keystone internal services
-
-Unless marked otherwise, all fields are strings.
-
-"""
-
-
-class Model(dict):
- """Base model class."""
-
- def __hash__(self):
- return self['id'].__hash__()
-
- @property
- def known_keys(cls):
- return cls.required_keys + cls.optional_keys
-
-
-class Token(Model):
- """Token object.
-
- Required keys:
- id
- expires (datetime)
-
- Optional keys:
- user
- tenant
- metadata
- trust_id
- """
-
- required_keys = ('id', 'expires')
- optional_keys = ('extra',)
-
-
-class Service(Model):
- """Service object.
-
- Required keys:
- id
- type
- name
-
- Optional keys:
- """
-
- required_keys = ('id', 'type', 'name')
- optional_keys = tuple()
-
-
-class Endpoint(Model):
- """Endpoint object
-
- Required keys:
- id
- region
- service_id
-
- Optional keys:
- internalurl
- publicurl
- adminurl
- """
-
- required_keys = ('id', 'region', 'service_id')
- optional_keys = ('internalurl', 'publicurl', 'adminurl')
-
-
-class User(Model):
- """User object.
-
- Required keys:
- id
- name
- domain_id
-
- Optional keys:
- password
- description
- email
- enabled (bool, default True)
- default_project_id
- """
-
- required_keys = ('id', 'name', 'domain_id')
- optional_keys = ('password', 'description', 'email', 'enabled',
- 'default_project_id')
-
-
-class Group(Model):
- """Group object.
-
- Required keys:
- id
- name
- domain_id
-
- Optional keys:
-
- description
-
- """
-
- required_keys = ('id', 'name', 'domain_id')
- optional_keys = ('description',)
-
-
-class Project(Model):
- """Project object.
-
- Required keys:
- id
- name
- domain_id
-
- Optional Keys:
- description
- enabled (bool, default True)
- is_domain (bool, default False)
-
- """
-
- required_keys = ('id', 'name', 'domain_id')
- optional_keys = ('description', 'enabled', 'is_domain')
-
-
-class Role(Model):
- """Role object.
-
- Required keys:
- id
- name
-
- """
-
- required_keys = ('id', 'name')
- optional_keys = tuple()
-
-
-class ImpliedRole(Model):
- """ImpliedRole object.
-
- Required keys:
- prior_role_id
- implied_role_id
- """
-
- required_keys = ('prior_role_id', 'implied_role_id')
- optional_keys = tuple()
-
-
-class Trust(Model):
- """Trust object.
-
- Required keys:
- id
- trustor_user_id
- trustee_user_id
- project_id
- """
-
- required_keys = ('id', 'trustor_user_id', 'trustee_user_id', 'project_id')
- optional_keys = ('expires_at',)
-
-
-class Domain(Model):
- """Domain object.
-
- Required keys:
- id
- name
-
- Optional keys:
-
- description
- enabled (bool, default True)
-
- """
-
- required_keys = ('id', 'name')
- optional_keys = ('description', 'enabled')
diff --git a/keystone-moon/keystone/common/openssl.py b/keystone-moon/keystone/common/openssl.py
deleted file mode 100644
index 0bea6d8e..00000000
--- a/keystone-moon/keystone/common/openssl.py
+++ /dev/null
@@ -1,337 +0,0 @@
-# Copyright 2012 OpenStack Foundation
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-
-import os
-
-from oslo_config import cfg
-from oslo_log import log
-
-from keystone.common import environment
-from keystone.common import utils
-from keystone.i18n import _LI, _LE, _LW
-
-LOG = log.getLogger(__name__)
-CONF = cfg.CONF
-
-PUBLIC_DIR_PERMS = 0o755 # -rwxr-xr-x
-PRIVATE_DIR_PERMS = 0o750 # -rwxr-x---
-PUBLIC_FILE_PERMS = 0o644 # -rw-r--r--
-PRIVATE_FILE_PERMS = 0o640 # -rw-r-----
-
-
-def file_exists(file_path):
- return os.path.exists(file_path)
-
-
-class BaseCertificateConfigure(object):
- """Create a certificate signing environment.
-
- This is based on a config section and reasonable OpenSSL defaults.
-
- """
-
- def __init__(self, conf_obj, server_conf_obj, keystone_user,
- keystone_group, rebuild, **kwargs):
- self.conf_dir = os.path.dirname(server_conf_obj.ca_certs)
- self.use_keystone_user = keystone_user
- self.use_keystone_group = keystone_group
- self.rebuild = rebuild
- self.ssl_config_file_name = os.path.join(self.conf_dir, "openssl.conf")
- self.request_file_name = os.path.join(self.conf_dir, "req.pem")
- self.ssl_dictionary = {'conf_dir': self.conf_dir,
- 'ca_cert': server_conf_obj.ca_certs,
- 'default_md': 'default',
- 'ssl_config': self.ssl_config_file_name,
- 'ca_private_key': conf_obj.ca_key,
- 'request_file': self.request_file_name,
- 'signing_key': server_conf_obj.keyfile,
- 'signing_cert': server_conf_obj.certfile,
- 'key_size': int(conf_obj.key_size),
- 'valid_days': int(conf_obj.valid_days),
- 'cert_subject': conf_obj.cert_subject}
-
- try:
- # OpenSSL 1.0 and newer support default_md = default,
- # older versions do not
- openssl_ver = environment.subprocess.check_output( # the arguments
- # are hardcoded and just check the openssl version
- ['openssl', 'version'])
- if b'OpenSSL 0.' in openssl_ver:
- self.ssl_dictionary['default_md'] = 'sha1'
- except environment.subprocess.CalledProcessError:
- LOG.warning(_LW('Failed to invoke ``openssl version``, '
- 'assuming is v1.0 or newer'))
- self.ssl_dictionary.update(kwargs)
-
- def exec_command(self, command):
- to_exec = [part % self.ssl_dictionary for part in command]
- LOG.info(_LI('Running command - %s'), ' '.join(to_exec))
- try:
- # NOTE(shaleh): use check_output instead of the simpler
- # `check_call()` in order to log any output from an error.
- environment.subprocess.check_output( # the arguments being passed
- # in are defined in this file and trusted to build CAs, keys
- # and certs
- to_exec,
- stderr=environment.subprocess.STDOUT)
- except environment.subprocess.CalledProcessError as e:
- LOG.error(_LE('Command %(to_exec)s exited with %(retcode)s '
- '- %(output)s'),
- {'to_exec': to_exec,
- 'retcode': e.returncode,
- 'output': e.output})
- raise e
-
- def clean_up_existing_files(self):
- files_to_clean = [self.ssl_dictionary['ca_private_key'],
- self.ssl_dictionary['ca_cert'],
- self.ssl_dictionary['signing_key'],
- self.ssl_dictionary['signing_cert'],
- ]
-
- existing_files = []
-
- for file_path in files_to_clean:
- if file_exists(file_path):
- if self.rebuild:
- # The file exists but the user wants to rebuild it, so blow
- # it away
- try:
- os.remove(file_path)
- except OSError as exc:
- LOG.error(_LE('Failed to remove file %(file_path)r: '
- '%(error)s'),
- {'file_path': file_path,
- 'error': exc.strerror})
- raise
- else:
- existing_files.append(file_path)
-
- return existing_files
-
- def build_ssl_config_file(self):
- utils.make_dirs(os.path.dirname(self.ssl_config_file_name),
- mode=PUBLIC_DIR_PERMS,
- user=self.use_keystone_user,
- group=self.use_keystone_group, log=LOG)
- if not file_exists(self.ssl_config_file_name):
- with open(self.ssl_config_file_name, 'w') as ssl_config_file:
- ssl_config_file.write(self.sslconfig % self.ssl_dictionary)
- utils.set_permissions(self.ssl_config_file_name,
- mode=PRIVATE_FILE_PERMS,
- user=self.use_keystone_user,
- group=self.use_keystone_group, log=LOG)
-
- index_file_name = os.path.join(self.conf_dir, 'index.txt')
- if not file_exists(index_file_name):
- with open(index_file_name, 'w') as index_file:
- index_file.write('')
- utils.set_permissions(index_file_name,
- mode=PRIVATE_FILE_PERMS,
- user=self.use_keystone_user,
- group=self.use_keystone_group, log=LOG)
-
- serial_file_name = os.path.join(self.conf_dir, 'serial')
- if not file_exists(serial_file_name):
- with open(serial_file_name, 'w') as index_file:
- index_file.write('01')
- utils.set_permissions(serial_file_name,
- mode=PRIVATE_FILE_PERMS,
- user=self.use_keystone_user,
- group=self.use_keystone_group, log=LOG)
-
- def build_ca_cert(self):
- ca_key_file = self.ssl_dictionary['ca_private_key']
- utils.make_dirs(os.path.dirname(ca_key_file),
- mode=PRIVATE_DIR_PERMS,
- user=self.use_keystone_user,
- group=self.use_keystone_group, log=LOG)
- if not file_exists(ca_key_file):
- self.exec_command(['openssl', 'genrsa',
- '-out', '%(ca_private_key)s',
- '%(key_size)d'])
- utils.set_permissions(ca_key_file,
- mode=PRIVATE_FILE_PERMS,
- user=self.use_keystone_user,
- group=self.use_keystone_group, log=LOG)
-
- ca_cert = self.ssl_dictionary['ca_cert']
- utils.make_dirs(os.path.dirname(ca_cert),
- mode=PUBLIC_DIR_PERMS,
- user=self.use_keystone_user,
- group=self.use_keystone_group, log=LOG)
- if not file_exists(ca_cert):
- self.exec_command(['openssl', 'req', '-new', '-x509',
- '-extensions', 'v3_ca',
- '-key', '%(ca_private_key)s',
- '-out', '%(ca_cert)s',
- '-days', '%(valid_days)d',
- '-config', '%(ssl_config)s',
- '-subj', '%(cert_subject)s'])
- utils.set_permissions(ca_cert,
- mode=PUBLIC_FILE_PERMS,
- user=self.use_keystone_user,
- group=self.use_keystone_group, log=LOG)
-
- def build_private_key(self):
- signing_keyfile = self.ssl_dictionary['signing_key']
- utils.make_dirs(os.path.dirname(signing_keyfile),
- mode=PRIVATE_DIR_PERMS,
- user=self.use_keystone_user,
- group=self.use_keystone_group, log=LOG)
- if not file_exists(signing_keyfile):
- self.exec_command(['openssl', 'genrsa', '-out', '%(signing_key)s',
- '%(key_size)d'])
- utils.set_permissions(signing_keyfile,
- mode=PRIVATE_FILE_PERMS,
- user=self.use_keystone_user,
- group=self.use_keystone_group, log=LOG)
-
- def build_signing_cert(self):
- signing_cert = self.ssl_dictionary['signing_cert']
-
- utils.make_dirs(os.path.dirname(signing_cert),
- mode=PUBLIC_DIR_PERMS,
- user=self.use_keystone_user,
- group=self.use_keystone_group, log=LOG)
- if not file_exists(signing_cert):
- self.exec_command(['openssl', 'req', '-key', '%(signing_key)s',
- '-new', '-out', '%(request_file)s',
- '-config', '%(ssl_config)s',
- '-subj', '%(cert_subject)s'])
-
- self.exec_command(['openssl', 'ca', '-batch',
- '-out', '%(signing_cert)s',
- '-config', '%(ssl_config)s',
- '-days', '%(valid_days)dd',
- '-cert', '%(ca_cert)s',
- '-keyfile', '%(ca_private_key)s',
- '-infiles', '%(request_file)s'])
-
- def run(self):
- try:
- existing_files = self.clean_up_existing_files()
- except OSError:
- print('An error occurred when rebuilding cert files.')
- return
- if existing_files:
- print('The following cert files already exist, use --rebuild to '
- 'remove the existing files before regenerating:')
- for f in existing_files:
- print('%s already exists' % f)
- return
-
- self.build_ssl_config_file()
- self.build_ca_cert()
- self.build_private_key()
- self.build_signing_cert()
-
-
-class ConfigurePKI(BaseCertificateConfigure):
- """Generate files for PKI signing using OpenSSL.
-
- Signed tokens require a private key and signing certificate which itself
- must be signed by a CA. This class generates them with workable defaults
- if each of the files are not present
-
- """
-
- def __init__(self, keystone_user, keystone_group, rebuild=False):
- super(ConfigurePKI, self).__init__(CONF.signing, CONF.signing,
- keystone_user, keystone_group,
- rebuild=rebuild)
-
-
-class ConfigureSSL(BaseCertificateConfigure):
- """Generate files for HTTPS using OpenSSL.
-
- Creates a public/private key and certificates. If a CA is not given
- one will be generated using provided arguments.
- """
-
- def __init__(self, keystone_user, keystone_group, rebuild=False):
- super(ConfigureSSL, self).__init__(CONF.ssl, CONF.eventlet_server_ssl,
- keystone_user, keystone_group,
- rebuild=rebuild)
-
-
-BaseCertificateConfigure.sslconfig = """
-# OpenSSL configuration file.
-#
-
-# Establish working directory.
-
-dir = %(conf_dir)s
-
-[ ca ]
-default_ca = CA_default
-
-[ CA_default ]
-new_certs_dir = $dir
-serial = $dir/serial
-database = $dir/index.txt
-default_days = 365
-default_md = %(default_md)s
-preserve = no
-email_in_dn = no
-nameopt = default_ca
-certopt = default_ca
-policy = policy_anything
-x509_extensions = usr_cert
-unique_subject = no
-
-[ policy_anything ]
-countryName = optional
-stateOrProvinceName = optional
-organizationName = optional
-organizationalUnitName = optional
-commonName = supplied
-emailAddress = optional
-
-[ req ]
-default_bits = 2048 # Size of keys
-default_keyfile = key.pem # name of generated keys
-string_mask = utf8only # permitted characters
-distinguished_name = req_distinguished_name
-req_extensions = v3_req
-x509_extensions = v3_ca
-
-[ req_distinguished_name ]
-countryName = Country Name (2 letter code)
-countryName_min = 2
-countryName_max = 2
-stateOrProvinceName = State or Province Name (full name)
-localityName = Locality Name (city, district)
-0.organizationName = Organization Name (company)
-organizationalUnitName = Organizational Unit Name (department, division)
-commonName = Common Name (hostname, IP, or your name)
-commonName_max = 64
-emailAddress = Email Address
-emailAddress_max = 64
-
-[ v3_ca ]
-basicConstraints = CA:TRUE
-subjectKeyIdentifier = hash
-authorityKeyIdentifier = keyid:always,issuer
-
-[ v3_req ]
-basicConstraints = CA:FALSE
-keyUsage = nonRepudiation, digitalSignature, keyEncipherment
-
-[ usr_cert ]
-basicConstraints = CA:FALSE
-subjectKeyIdentifier = hash
-authorityKeyIdentifier = keyid:always
-"""
diff --git a/keystone-moon/keystone/common/pemutils.py b/keystone-moon/keystone/common/pemutils.py
deleted file mode 100755
index ddbe05cf..00000000
--- a/keystone-moon/keystone/common/pemutils.py
+++ /dev/null
@@ -1,509 +0,0 @@
-# Copyright 2013 Red Hat, Inc.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-
-"""
-PEM formatted data is used frequently in conjunction with X509 PKI as
-a data exchange mechanism for binary data. The acronym PEM stands for
-Privacy Enhanced Mail as defined in RFC-1421. Contrary to expectation
-the PEM format in common use has little to do with RFC-1421. Instead
-what we know as PEM format grew out of the need for a data exchange
-mechanism largely by the influence of OpenSSL. Other X509
-implementations have adopted it.
-
-Unfortunately PEM format has never been officially standarized. It's
-basic format is as follows:
-
-1) A header consisting of 5 hyphens followed by the word BEGIN and a
-single space. Then an upper case string describing the contents of the
-PEM block, this is followed by 5 hyphens and a newline.
-
-2) Binary data (typically in DER ASN.1 format) encoded in base64. The
-base64 text is line wrapped so that each line of base64 is 64
-characters long and terminated with a newline. The last line of base64
-text may be less than 64 characters. The content and format of the
-binary data is entirely dependent upon the type of data announced in
-the header and footer.
-
-3) A footer in the exact same as the header except the word BEGIN is
-replaced by END. The content name in both the header and footer should
-exactly match.
-
-The above is called a PEM block. It is permissible for multiple PEM
-blocks to appear in a single file or block of text. This is often used
-when specifying multiple X509 certificates.
-
-An example PEM block for a certificate is:
-
------BEGIN CERTIFICATE-----
-MIIC0TCCAjqgAwIBAgIJANsHKV73HYOwMA0GCSqGSIb3DQEBBQUAMIGeMQowCAYD
-VQQFEwE1MQswCQYDVQQGEwJVUzELMAkGA1UECBMCQ0ExEjAQBgNVBAcTCVN1bm55
-dmFsZTESMBAGA1UEChMJT3BlblN0YWNrMREwDwYDVQQLEwhLZXlzdG9uZTElMCMG
-CSqGSIb3DQEJARYWa2V5c3RvbmVAb3BlbnN0YWNrLm9yZzEUMBIGA1UEAxMLU2Vs
-ZiBTaWduZWQwIBcNMTIxMTA1MTgxODI0WhgPMjA3MTA0MzAxODE4MjRaMIGeMQow
-CAYDVQQFEwE1MQswCQYDVQQGEwJVUzELMAkGA1UECBMCQ0ExEjAQBgNVBAcTCVN1
-bm55dmFsZTESMBAGA1UEChMJT3BlblN0YWNrMREwDwYDVQQLEwhLZXlzdG9uZTEl
-MCMGCSqGSIb3DQEJARYWa2V5c3RvbmVAb3BlbnN0YWNrLm9yZzEUMBIGA1UEAxML
-U2VsZiBTaWduZWQwgZ8wDQYJKoZIhvcNAQEBBQADgY0AMIGJAoGBALzI17ExCaqd
-r7xY2Q5CBZ1bW1lsrXxS8eNJRdQtskDuQVAluY03/OGZd8HQYiiY/ci2tYy7BNIC
-bh5GaO95eqTDykJR3liOYE/tHbY6puQlj2ZivmhlSd2d5d7lF0/H28RQsLu9VktM
-uw6q9DpDm35jfrr8LgSeA3MdVqcS/4OhAgMBAAGjEzARMA8GA1UdEwEB/wQFMAMB
-Af8wDQYJKoZIhvcNAQEFBQADgYEAjSQND7i1dNZtLKpWgX+JqMr3BdVlM15mFeVr
-C26ZspZjZVY5okdozO9gU3xcwRe4Cg30sKFOe6EBQKpkTZucFOXwBtD3h6dWJrdD
-c+m/CL/rs0GatDavbaIT2vv405SQUQooCdVh72LYel+4/a6xmRd7fQx3iEXN9QYj
-vmHJUcA=
------END CERTIFICATE-----
-
-PEM format is safe for transmission in 7-bit ASCII systems
-(i.e. standard email). Since 7-bit ASCII is a proper subset of UTF-8
-and Latin-1 it is not affected by transcoding between those
-charsets. Nor is PEM format affected by the choice of line
-endings. This makes PEM format particularity attractive for transport
-and storage of binary data.
-
-This module provides a number of utilities supporting the generation
-and consumption of PEM formatted data including:
-
- * parse text and find all PEM blocks contained in the
- text. Information on the location of the block in the text, the
- type of PEM block, and it's base64 and binary data contents.
-
- * parse text assumed to contain PEM data and return the binary
- data.
-
- * test if a block of text is a PEM block
-
- * convert base64 text into a formatted PEM block
-
- * convert binary data into a formatted PEM block
-
- * access to the valid PEM types and their headers
-
-"""
-
-import base64
-import re
-
-import six
-
-from keystone.common import base64utils
-from keystone.i18n import _
-
-
-PEM_TYPE_TO_HEADER = {
- u'cms': u'CMS',
- u'dsa-private': u'DSA PRIVATE KEY',
- u'dsa-public': u'DSA PUBLIC KEY',
- u'ecdsa-public': u'ECDSA PUBLIC KEY',
- u'ec-private': u'EC PRIVATE KEY',
- u'pkcs7': u'PKCS7',
- u'pkcs7-signed': u'PKCS',
- u'pkcs8': u'ENCRYPTED PRIVATE KEY',
- u'private-key': u'PRIVATE KEY',
- u'public-key': u'PUBLIC KEY',
- u'rsa-private': u'RSA PRIVATE KEY',
- u'rsa-public': u'RSA PUBLIC KEY',
- u'cert': u'CERTIFICATE',
- u'crl': u'X509 CRL',
- u'cert-pair': u'CERTIFICATE PAIR',
- u'csr': u'CERTIFICATE REQUEST',
-}
-
-# This is not a 1-to-1 reverse map of PEM_TYPE_TO_HEADER
-# because it includes deprecated headers that map to 1 pem_type.
-PEM_HEADER_TO_TYPE = {
- u'CMS': u'cms',
- u'DSA PRIVATE KEY': u'dsa-private',
- u'DSA PUBLIC KEY': u'dsa-public',
- u'ECDSA PUBLIC KEY': u'ecdsa-public',
- u'EC PRIVATE KEY': u'ec-private',
- u'PKCS7': u'pkcs7',
- u'PKCS': u'pkcs7-signed',
- u'ENCRYPTED PRIVATE KEY': u'pkcs8',
- u'PRIVATE KEY': u'private-key',
- u'PUBLIC KEY': u'public-key',
- u'RSA PRIVATE KEY': u'rsa-private',
- u'RSA PUBLIC KEY': u'rsa-public',
- u'CERTIFICATE': u'cert',
- u'X509 CERTIFICATE': u'cert',
- u'CERTIFICATE PAIR': u'cert-pair',
- u'X509 CRL': u'crl',
- u'CERTIFICATE REQUEST': u'csr',
- u'NEW CERTIFICATE REQUEST': u'csr',
-}
-
-# List of valid pem_types
-pem_types = sorted(PEM_TYPE_TO_HEADER.keys())
-
-# List of valid pem_headers
-pem_headers = sorted(PEM_TYPE_TO_HEADER.values())
-
-_pem_begin_re = re.compile(r'^-{5}BEGIN\s+([^-]+)-{5}\s*$', re.MULTILINE)
-_pem_end_re = re.compile(r'^-{5}END\s+([^-]+)-{5}\s*$', re.MULTILINE)
-
-
-class PEMParseResult(object):
- """Information returned when a PEM block is found in text.
-
- PEMParseResult contains information about a PEM block discovered
- while parsing text. The following properties are defined:
-
- pem_type
- A short hand name for the type of the PEM data, e.g. cert,
- csr, crl, cms, key. Valid pem_types are listed in pem_types.
- When the pem_type is set the pem_header is updated to match it.
-
- pem_header
- The text following '-----BEGIN ' in the PEM header.
- Common examples are:
-
- -----BEGIN CERTIFICATE-----
- -----BEGIN CMS-----
-
- Thus the pem_header would be CERTIFICATE and CMS respectively.
- When the pem_header is set the pem_type is updated to match it.
-
- pem_start, pem_end
- The beginning and ending positions of the PEM block
- including the PEM header and footer.
-
- base64_start, base64_end
- The beginning and ending positions of the base64 data
- contained inside the PEM header and footer. Includes trailing
- new line
-
- binary_data
- The decoded base64 data. None if not decoded.
-
- """
-
- def __init__(self, pem_type=None, pem_header=None,
- pem_start=None, pem_end=None,
- base64_start=None, base64_end=None,
- binary_data=None):
-
- self._pem_type = None
- self._pem_header = None
-
- if pem_type is not None:
- self.pem_type = pem_type
-
- if pem_header is not None:
- self.pem_header = pem_header
-
- self.pem_start = pem_start
- self.pem_end = pem_end
- self.base64_start = base64_start
- self.base64_end = base64_end
- self.binary_data = binary_data
-
- @property
- def pem_type(self):
- return self._pem_type
-
- @pem_type.setter
- def pem_type(self, pem_type):
- if pem_type is None:
- self._pem_type = None
- self._pem_header = None
- else:
- pem_header = PEM_TYPE_TO_HEADER.get(pem_type)
- if pem_header is None:
- raise ValueError(_('unknown pem_type "%(pem_type)s", '
- 'valid types are: %(valid_pem_types)s') %
- {'pem_type': pem_type,
- 'valid_pem_types': ', '.join(pem_types)})
- self._pem_type = pem_type
- self._pem_header = pem_header
-
- @property
- def pem_header(self):
- return self._pem_header
-
- @pem_header.setter
- def pem_header(self, pem_header):
- if pem_header is None:
- self._pem_type = None
- self._pem_header = None
- else:
- pem_type = PEM_HEADER_TO_TYPE.get(pem_header)
- if pem_type is None:
- raise ValueError(_('unknown pem header "%(pem_header)s", '
- 'valid headers are: '
- '%(valid_pem_headers)s') %
- {'pem_header': pem_header,
- 'valid_pem_headers':
- ', '.join("'%s'" %
- [x for x in pem_headers])})
-
- self._pem_type = pem_type
- self._pem_header = pem_header
-
-
-def pem_search(text, start=0):
- """Search for a block of PEM formatted data
-
- Search for a PEM block in a text string. The search begins at
- start. If a PEM block is found a PEMParseResult object is
- returned, otherwise if no PEM block is found None is returned.
-
- If the pem_type is not the same in both the header and footer
- a ValueError is raised.
-
- The start and end positions are suitable for use as slices into
- the text. To search for multiple PEM blocks pass pem_end as the
- start position for the next iteration. Terminate the iteration
- when None is returned. Example::
-
- start = 0
- while True:
- block = pem_search(text, start)
- if block is None:
- break
- base64_data = text[block.base64_start : block.base64_end]
- start = block.pem_end
-
- :param text: the text to search for PEM blocks
- :type text: string
- :param start: the position in text to start searching from (default: 0)
- :type start: int
- :returns: PEMParseResult or None if not found
- :raises: ValueError
- """
-
- match = _pem_begin_re.search(text, pos=start)
- if match:
- pem_start = match.start()
- begin_text = match.group(0)
- base64_start = min(len(text), match.end() + 1)
- begin_pem_header = match.group(1).strip()
-
- match = _pem_end_re.search(text, pos=base64_start)
- if match:
- pem_end = min(len(text), match.end() + 1)
- base64_end = match.start()
- end_pem_header = match.group(1).strip()
- else:
- raise ValueError(_('failed to find end matching "%s"') %
- begin_text)
-
- if begin_pem_header != end_pem_header:
- raise ValueError(_('beginning & end PEM headers do not match '
- '(%(begin_pem_header)s'
- '!= '
- '%(end_pem_header)s)') %
- {'begin_pem_header': begin_pem_header,
- 'end_pem_header': end_pem_header})
- else:
- return None
-
- result = PEMParseResult(pem_header=begin_pem_header,
- pem_start=pem_start, pem_end=pem_end,
- base64_start=base64_start, base64_end=base64_end)
-
- return result
-
-
-def parse_pem(text, pem_type=None, max_items=None):
- """Scan text for PEM data, return list of PEM items
-
- The input text is scanned for PEM blocks, for each one found a
- PEMParseResult is constructed and added to the return list.
-
- pem_type operates as a filter on the type of PEM desired. If
- pem_type is specified only those PEM blocks which match will be
- included. The pem_type is a logical name, not the actual text in
- the pem header (e.g. 'cert'). If the pem_type is None all PEM
- blocks are returned.
-
- If max_items is specified the result is limited to that number of
- items.
-
- The return value is a list of PEMParseResult objects. The
- PEMParseResult provides complete information about the PEM block
- including the decoded binary data for the PEM block. The list is
- ordered in the same order as found in the text.
-
- Examples::
-
- # Get all certs
- certs = parse_pem(text, 'cert')
-
- # Get the first cert
- try:
- binary_cert = parse_pem(text, 'cert', 1)[0].binary_data
- except IndexError:
- raise ValueError('no cert found')
-
- :param text: The text to search for PEM blocks
- :type text: string
- :param pem_type: Only return data for this pem_type.
- Valid types are: csr, cert, crl, cms, key.
- If pem_type is None no filtering is performed.
- (default: None)
- :type pem_type: string or None
- :param max_items: Limit the number of blocks returned. (default: None)
- :type max_items: int or None
- :return: List of PEMParseResult, one for each PEM block found
- :raises: ValueError, InvalidBase64Error
- """
-
- pem_blocks = []
- start = 0
-
- while True:
- block = pem_search(text, start)
- if block is None:
- break
- start = block.pem_end
- if pem_type is None:
- pem_blocks.append(block)
- else:
- try:
- if block.pem_type == pem_type:
- pem_blocks.append(block)
- except KeyError:
- raise ValueError(_('unknown pem_type: "%s"') % (pem_type))
-
- if max_items is not None and len(pem_blocks) >= max_items:
- break
-
- for block in pem_blocks:
- base64_data = text[block.base64_start:block.base64_end]
- try:
- binary_data = base64.b64decode(base64_data)
- except Exception as e:
- block.binary_data = None
- raise base64utils.InvalidBase64Error(
- _('failed to base64 decode %(pem_type)s PEM at position'
- '%(position)d: %(err_msg)s') %
- {'pem_type': block.pem_type,
- 'position': block.pem_start,
- 'err_msg': six.text_type(e)})
- else:
- block.binary_data = binary_data
-
- return pem_blocks
-
-
-def get_pem_data(text, pem_type='cert'):
- """Scan text for PEM data, return binary contents
-
- The input text is scanned for a PEM block which matches the pem_type.
- If found the binary data contained in the PEM block is returned.
- If no PEM block is found or it does not match the specified pem type
- None is returned.
-
- :param text: The text to search for the PEM block
- :type text: string
- :param pem_type: Only return data for this pem_type.
- Valid types are: csr, cert, crl, cms, key.
- (default: 'cert')
- :type pem_type: string
- :return: binary data or None if not found.
- """
-
- blocks = parse_pem(text, pem_type, 1)
- if not blocks:
- return None
- return blocks[0].binary_data
-
-
-def is_pem(text, pem_type='cert'):
- """Does this text contain a PEM block.
-
- Check for the existence of a PEM formatted block in the
- text, if one is found verify it's contents can be base64
- decoded, if so return True. Return False otherwise.
-
- :param text: The text to search for PEM blocks
- :type text: string
- :param pem_type: Only return data for this pem_type.
- Valid types are: csr, cert, crl, cms, key.
- (default: 'cert')
- :type pem_type: string
- :returns: bool -- True if text contains PEM matching the pem_type,
- False otherwise.
- """
-
- try:
- pem_blocks = parse_pem(text, pem_type, max_items=1)
- except base64utils.InvalidBase64Error:
- return False
-
- if pem_blocks:
- return True
- else:
- return False
-
-
-def base64_to_pem(base64_text, pem_type='cert'):
- """Format string of base64 text into PEM format
-
- Input is assumed to consist only of members of the base64 alphabet
- (i.e no whitepace). Use one of the filter functions from
- base64utils to assure the input is clean
- (i.e. strip_whitespace()).
-
- :param base64_text: text containing ONLY base64 alphabet
- characters to be inserted into PEM output.
- :type base64_text: string
- :param pem_type: Produce a PEM block for this type.
- Valid types are: csr, cert, crl, cms, key.
- (default: 'cert')
- :type pem_type: string
- :returns: string -- PEM formatted text
-
-
- """
- pem_header = PEM_TYPE_TO_HEADER[pem_type]
- buf = six.StringIO()
-
- buf.write(u'-----BEGIN %s-----' % pem_header)
- buf.write(u'\n')
-
- for line in base64utils.base64_wrap_iter(base64_text, width=64):
- buf.write(line)
- buf.write(u'\n')
-
- buf.write(u'-----END %s-----' % pem_header)
- buf.write(u'\n')
-
- text = buf.getvalue()
- buf.close()
- return text
-
-
-def binary_to_pem(binary_data, pem_type='cert'):
- """Format binary data into PEM format
-
- Example:
-
- # get the certificate binary data in DER format
- der_data = certificate.der
- # convert the DER binary data into a PEM
- pem = binary_to_pem(der_data, 'cert')
-
-
- :param binary_data: binary data to encapsulate into PEM
- :type binary_data: buffer
- :param pem_type: Produce a PEM block for this type.
- Valid types are: csr, cert, crl, cms, key.
- (default: 'cert')
- :type pem_type: string
- :returns: string -- PEM formatted text
-
- """
- base64_text = base64.b64encode(binary_data)
- return base64_to_pem(base64_text, pem_type)
diff --git a/keystone-moon/keystone/common/router.py b/keystone-moon/keystone/common/router.py
deleted file mode 100644
index 74e03ad2..00000000
--- a/keystone-moon/keystone/common/router.py
+++ /dev/null
@@ -1,82 +0,0 @@
-# Copyright 2012 OpenStack Foundation
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from keystone.common import json_home
-from keystone.common import wsgi
-
-
-class Router(wsgi.ComposableRouter):
- def __init__(self, controller, collection_key, key,
- resource_descriptions=None,
- is_entity_implemented=True,
- method_template=None):
- self.controller = controller
- self.key = key
- self.collection_key = collection_key
- self._resource_descriptions = resource_descriptions
- self._is_entity_implemented = is_entity_implemented
- self.method_template = method_template or '%s'
-
- def add_routes(self, mapper):
- collection_path = '/%(collection_key)s' % {
- 'collection_key': self.collection_key}
- entity_path = '/%(collection_key)s/{%(key)s_id}' % {
- 'collection_key': self.collection_key,
- 'key': self.key}
-
- mapper.connect(
- collection_path,
- controller=self.controller,
- action=self.method_template % 'create_%s' % self.key,
- conditions=dict(method=['POST']))
- mapper.connect(
- collection_path,
- controller=self.controller,
- action=self.method_template % 'list_%s' % self.collection_key,
- conditions=dict(method=['GET']))
- mapper.connect(
- entity_path,
- controller=self.controller,
- action=self.method_template % 'get_%s' % self.key,
- conditions=dict(method=['GET']))
- mapper.connect(
- entity_path,
- controller=self.controller,
- action=self.method_template % 'update_%s' % self.key,
- conditions=dict(method=['PATCH']))
- mapper.connect(
- entity_path,
- controller=self.controller,
- action=self.method_template % 'delete_%s' % self.key,
- conditions=dict(method=['DELETE']))
-
- # Add the collection resource and entity resource to the resource
- # descriptions.
-
- collection_rel = json_home.build_v3_resource_relation(
- self.collection_key)
- rel_data = {'href': collection_path, }
- self._resource_descriptions.append((collection_rel, rel_data))
-
- if self._is_entity_implemented:
- entity_rel = json_home.build_v3_resource_relation(self.key)
- id_str = '%s_id' % self.key
- id_param_rel = json_home.build_v3_parameter_relation(id_str)
- entity_rel_data = {
- 'href-template': entity_path,
- 'href-vars': {
- id_str: id_param_rel,
- },
- }
- self._resource_descriptions.append((entity_rel, entity_rel_data))
diff --git a/keystone-moon/keystone/common/sql/__init__.py b/keystone-moon/keystone/common/sql/__init__.py
deleted file mode 100644
index 84e0fb83..00000000
--- a/keystone-moon/keystone/common/sql/__init__.py
+++ /dev/null
@@ -1,15 +0,0 @@
-# Copyright 2012 OpenStack Foundation
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from keystone.common.sql.core import * # noqa
diff --git a/keystone-moon/keystone/common/sql/core.py b/keystone-moon/keystone/common/sql/core.py
deleted file mode 100644
index cb026356..00000000
--- a/keystone-moon/keystone/common/sql/core.py
+++ /dev/null
@@ -1,434 +0,0 @@
-# Copyright 2012 OpenStack Foundation
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-"""SQL backends for the various services.
-
-Before using this module, call initialize(). This has to be done before
-CONF() because it sets up configuration options.
-
-"""
-import functools
-
-from oslo_config import cfg
-from oslo_db import exception as db_exception
-from oslo_db import options as db_options
-from oslo_db.sqlalchemy import enginefacade
-from oslo_db.sqlalchemy import models
-from oslo_log import log
-from oslo_serialization import jsonutils
-import six
-import sqlalchemy as sql
-from sqlalchemy.ext import declarative
-from sqlalchemy.orm.attributes import flag_modified, InstrumentedAttribute
-from sqlalchemy import types as sql_types
-
-from keystone.common import driver_hints
-from keystone.common import utils
-from keystone import exception
-from keystone.i18n import _
-
-
-CONF = cfg.CONF
-LOG = log.getLogger(__name__)
-
-ModelBase = declarative.declarative_base()
-
-
-# For exporting to other modules
-Column = sql.Column
-Index = sql.Index
-String = sql.String
-Integer = sql.Integer
-Enum = sql.Enum
-ForeignKey = sql.ForeignKey
-DateTime = sql.DateTime
-IntegrityError = sql.exc.IntegrityError
-DBDuplicateEntry = db_exception.DBDuplicateEntry
-OperationalError = sql.exc.OperationalError
-NotFound = sql.orm.exc.NoResultFound
-Boolean = sql.Boolean
-Text = sql.Text
-UniqueConstraint = sql.UniqueConstraint
-PrimaryKeyConstraint = sql.PrimaryKeyConstraint
-joinedload = sql.orm.joinedload
-# Suppress flake8's unused import warning for flag_modified:
-flag_modified = flag_modified
-
-
-def initialize():
- """Initialize the module."""
- db_options.set_defaults(
- CONF,
- connection="sqlite:///keystone.db")
-
-
-def initialize_decorator(init):
- """Ensure that the length of string field do not exceed the limit.
-
- This decorator check the initialize arguments, to make sure the
- length of string field do not exceed the length limit, or raise a
- 'StringLengthExceeded' exception.
-
- Use decorator instead of inheritance, because the metaclass will
- check the __tablename__, primary key columns, etc. at the class
- definition.
-
- """
- def initialize(self, *args, **kwargs):
- cls = type(self)
- for k, v in kwargs.items():
- if hasattr(cls, k):
- attr = getattr(cls, k)
- if isinstance(attr, InstrumentedAttribute):
- column = attr.property.columns[0]
- if isinstance(column.type, String):
- if not isinstance(v, six.text_type):
- v = six.text_type(v)
- if column.type.length and column.type.length < len(v):
- raise exception.StringLengthExceeded(
- string=v, type=k, length=column.type.length)
-
- init(self, *args, **kwargs)
- return initialize
-
-ModelBase.__init__ = initialize_decorator(ModelBase.__init__)
-
-
-# Special Fields
-class JsonBlob(sql_types.TypeDecorator):
-
- impl = sql.Text
-
- def process_bind_param(self, value, dialect):
- return jsonutils.dumps(value)
-
- def process_result_value(self, value, dialect):
- return jsonutils.loads(value)
-
-
-class DictBase(models.ModelBase):
- attributes = []
-
- @classmethod
- def from_dict(cls, d):
- new_d = d.copy()
-
- new_d['extra'] = {k: new_d.pop(k) for k in six.iterkeys(d)
- if k not in cls.attributes and k != 'extra'}
-
- return cls(**new_d)
-
- def to_dict(self, include_extra_dict=False):
- """Returns the model's attributes as a dictionary.
-
- If include_extra_dict is True, 'extra' attributes are literally
- included in the resulting dictionary twice, for backwards-compatibility
- with a broken implementation.
-
- """
- d = self.extra.copy()
- for attr in self.__class__.attributes:
- d[attr] = getattr(self, attr)
-
- if include_extra_dict:
- d['extra'] = self.extra.copy()
-
- return d
-
- def __getitem__(self, key):
- if key in self.extra:
- return self.extra[key]
- return getattr(self, key)
-
-
-class ModelDictMixin(object):
-
- @classmethod
- def from_dict(cls, d):
- """Returns a model instance from a dictionary."""
- return cls(**d)
-
- def to_dict(self):
- """Returns the model's attributes as a dictionary."""
- names = (column.name for column in self.__table__.columns)
- return {name: getattr(self, name) for name in names}
-
-
-_main_context_manager = None
-
-
-def _get_main_context_manager():
- global _main_context_manager
-
- if not _main_context_manager:
- _main_context_manager = enginefacade.transaction_context()
-
- return _main_context_manager
-
-
-def cleanup():
- global _main_context_manager
-
- _main_context_manager = None
-
-
-_CONTEXT = None
-
-
-def _get_context():
- global _CONTEXT
- if _CONTEXT is None:
- # NOTE(dims): Delay the `threading.local` import to allow for
- # eventlet/gevent monkeypatching to happen
- import threading
- _CONTEXT = threading.local()
- return _CONTEXT
-
-
-def session_for_read():
- return _get_main_context_manager().reader.using(_get_context())
-
-
-def session_for_write():
- return _get_main_context_manager().writer.using(_get_context())
-
-
-def truncated(f):
- return driver_hints.truncated(f)
-
-
-class _WontMatch(Exception):
- """Raised to indicate that the filter won't match.
-
- This is raised to short-circuit the computation of the filter as soon as
- it's discovered that the filter requested isn't going to match anything.
-
- A filter isn't going to match anything if the value is too long for the
- field, for example.
-
- """
-
- @classmethod
- def check(cls, value, col_attr):
- """Check if the value can match given the column attributes.
-
- Raises this class if the value provided can't match any value in the
- column in the table given the column's attributes. For example, if the
- column is a string and the value is longer than the column then it
- won't match any value in the column in the table.
-
- """
- col = col_attr.property.columns[0]
- if isinstance(col.type, sql.types.Boolean):
- # The column is a Boolean, we should have already validated input.
- return
- if not col.type.length:
- # The column doesn't have a length so can't validate anymore.
- return
- if len(value) > col.type.length:
- raise cls()
- # Otherwise the value could match a value in the column.
-
-
-def _filter(model, query, hints):
- """Applies filtering to a query.
-
- :param model: the table model in question
- :param query: query to apply filters to
- :param hints: contains the list of filters yet to be satisfied.
- Any filters satisfied here will be removed so that
- the caller will know if any filters remain.
-
- :returns query: query, updated with any filters satisfied
-
- """
- def inexact_filter(model, query, filter_, satisfied_filters):
- """Applies an inexact filter to a query.
-
- :param model: the table model in question
- :param query: query to apply filters to
- :param dict filter_: describes this filter
- :param list satisfied_filters: filter_ will be added if it is
- satisfied.
-
- :returns query: query updated to add any inexact filters we could
- satisfy
-
- """
- column_attr = getattr(model, filter_['name'])
-
- # TODO(henry-nash): Sqlalchemy 0.7 defaults to case insensitivity
- # so once we find a way of changing that (maybe on a call-by-call
- # basis), we can add support for the case sensitive versions of
- # the filters below. For now, these case sensitive versions will
- # be handled at the controller level.
-
- if filter_['case_sensitive']:
- return query
-
- if filter_['comparator'] == 'contains':
- _WontMatch.check(filter_['value'], column_attr)
- query_term = column_attr.ilike('%%%s%%' % filter_['value'])
- elif filter_['comparator'] == 'startswith':
- _WontMatch.check(filter_['value'], column_attr)
- query_term = column_attr.ilike('%s%%' % filter_['value'])
- elif filter_['comparator'] == 'endswith':
- _WontMatch.check(filter_['value'], column_attr)
- query_term = column_attr.ilike('%%%s' % filter_['value'])
- else:
- # It's a filter we don't understand, so let the caller
- # work out if they need to do something with it.
- return query
-
- satisfied_filters.append(filter_)
- return query.filter(query_term)
-
- def exact_filter(model, query, filter_, satisfied_filters):
- """Applies an exact filter to a query.
-
- :param model: the table model in question
- :param query: query to apply filters to
- :param dict filter_: describes this filter
- :param list satisfied_filters: filter_ will be added if it is
- satisfied.
- :returns query: query updated to add any exact filters we could
- satisfy
- """
- key = filter_['name']
-
- col = getattr(model, key)
- if isinstance(col.property.columns[0].type, sql.types.Boolean):
- filter_val = utils.attr_as_boolean(filter_['value'])
- else:
- _WontMatch.check(filter_['value'], col)
- filter_val = filter_['value']
-
- satisfied_filters.append(filter_)
- return query.filter(col == filter_val)
-
- try:
- satisfied_filters = []
- for filter_ in hints.filters:
- if filter_['name'] not in model.attributes:
- continue
- if filter_['comparator'] == 'equals':
- query = exact_filter(model, query, filter_,
- satisfied_filters)
- else:
- query = inexact_filter(model, query, filter_,
- satisfied_filters)
-
- # Remove satisfied filters, then the caller will know remaining filters
- for filter_ in satisfied_filters:
- hints.filters.remove(filter_)
-
- return query
- except _WontMatch:
- hints.cannot_match = True
- return
-
-
-def _limit(query, hints):
- """Applies a limit to a query.
-
- :param query: query to apply filters to
- :param hints: contains the list of filters and limit details.
-
- :returns: updated query
-
- """
- # NOTE(henry-nash): If we were to implement pagination, then we
- # we would expand this method to support pagination and limiting.
-
- # If we satisfied all the filters, set an upper limit if supplied
- if hints.limit:
- query = query.limit(hints.limit['limit'])
- return query
-
-
-def filter_limit_query(model, query, hints):
- """Applies filtering and limit to a query.
-
- :param model: table model
- :param query: query to apply filters to
- :param hints: contains the list of filters and limit details. This may
- be None, indicating that there are no filters or limits
- to be applied. If it's not None, then any filters
- satisfied here will be removed so that the caller will
- know if any filters remain.
-
- :returns: updated query
-
- """
- if hints is None:
- return query
-
- # First try and satisfy any filters
- query = _filter(model, query, hints)
-
- if hints.cannot_match:
- # Nothing's going to match, so don't bother with the query.
- return []
-
- # NOTE(henry-nash): Any unsatisfied filters will have been left in
- # the hints list for the controller to handle. We can only try and
- # limit here if all the filters are already satisfied since, if not,
- # doing so might mess up the final results. If there are still
- # unsatisfied filters, we have to leave any limiting to the controller
- # as well.
-
- if not hints.filters:
- return _limit(query, hints)
- else:
- return query
-
-
-def handle_conflicts(conflict_type='object'):
- """Converts select sqlalchemy exceptions into HTTP 409 Conflict."""
- _conflict_msg = 'Conflict %(conflict_type)s: %(details)s'
-
- def decorator(method):
- @functools.wraps(method)
- def wrapper(*args, **kwargs):
- try:
- return method(*args, **kwargs)
- except db_exception.DBDuplicateEntry as e:
- # LOG the exception for debug purposes, do not send the
- # exception details out with the raised Conflict exception
- # as it can contain raw SQL.
- LOG.debug(_conflict_msg, {'conflict_type': conflict_type,
- 'details': six.text_type(e)})
- raise exception.Conflict(type=conflict_type,
- details=_('Duplicate Entry'))
- except db_exception.DBError as e:
- # TODO(blk-u): inspecting inner_exception breaks encapsulation;
- # oslo_db should provide exception we need.
- if isinstance(e.inner_exception, IntegrityError):
- # LOG the exception for debug purposes, do not send the
- # exception details out with the raised Conflict exception
- # as it can contain raw SQL.
- LOG.debug(_conflict_msg, {'conflict_type': conflict_type,
- 'details': six.text_type(e)})
- # NOTE(morganfainberg): This is really a case where the SQL
- # failed to store the data. This is not something that the
- # user has done wrong. Example would be a ForeignKey is
- # missing; the code that is executed before reaching the
- # SQL writing to the DB should catch the issue.
- raise exception.UnexpectedError(
- _('An unexpected error occurred when trying to '
- 'store %s') % conflict_type)
- raise
-
- return wrapper
- return decorator
diff --git a/keystone-moon/keystone/common/sql/migrate_repo/README b/keystone-moon/keystone/common/sql/migrate_repo/README
deleted file mode 100644
index 4ea8dd4f..00000000
--- a/keystone-moon/keystone/common/sql/migrate_repo/README
+++ /dev/null
@@ -1,4 +0,0 @@
-This is a database migration repository.
-
-More information at
-https://git.openstack.org/cgit/openstack/sqlalchemy-migrate
diff --git a/keystone-moon/keystone/common/sql/migrate_repo/__init__.py b/keystone-moon/keystone/common/sql/migrate_repo/__init__.py
deleted file mode 100644
index e69de29b..00000000
--- a/keystone-moon/keystone/common/sql/migrate_repo/__init__.py
+++ /dev/null
diff --git a/keystone-moon/keystone/common/sql/migrate_repo/manage.py b/keystone-moon/keystone/common/sql/migrate_repo/manage.py
deleted file mode 100644
index 39fa3892..00000000
--- a/keystone-moon/keystone/common/sql/migrate_repo/manage.py
+++ /dev/null
@@ -1,5 +0,0 @@
-#!/usr/bin/env python
-from migrate.versioning.shell import main
-
-if __name__ == '__main__':
- main(debug='False')
diff --git a/keystone-moon/keystone/common/sql/migrate_repo/migrate.cfg b/keystone-moon/keystone/common/sql/migrate_repo/migrate.cfg
deleted file mode 100644
index db531bb4..00000000
--- a/keystone-moon/keystone/common/sql/migrate_repo/migrate.cfg
+++ /dev/null
@@ -1,25 +0,0 @@
-[db_settings]
-# Used to identify which repository this database is versioned under.
-# You can use the name of your project.
-repository_id=keystone
-
-# The name of the database table used to track the schema version.
-# This name shouldn't already be used by your project.
-# If this is changed once a database is under version control, you'll need to
-# change the table name in each database too.
-version_table=migrate_version
-
-# When committing a change script, Migrate will attempt to generate the
-# sql for all supported databases; normally, if one of them fails - probably
-# because you don't have that database installed - it is ignored and the
-# commit continues, perhaps ending successfully.
-# Databases in this list MUST compile successfully during a commit, or the
-# entire commit will fail. List the databases your application will actually
-# be using to ensure your updates to that database work properly.
-# This must be a list; example: ['postgres','sqlite']
-required_dbs=[]
-
-# When creating new change scripts, Migrate will stamp the new script with
-# a version number. By default this is latest_version + 1. You can set this
-# to 'true' to tell Migrate to use the UTC timestamp instead.
-use_timestamp_numbering=False
diff --git a/keystone-moon/keystone/common/sql/migrate_repo/versions/067_kilo.py b/keystone-moon/keystone/common/sql/migrate_repo/versions/067_kilo.py
deleted file mode 100644
index a6dbed67..00000000
--- a/keystone-moon/keystone/common/sql/migrate_repo/versions/067_kilo.py
+++ /dev/null
@@ -1,317 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-
-import migrate
-from oslo_log import log
-import sqlalchemy as sql
-
-from keystone.assignment.backends import sql as assignment_sql
-from keystone.common import sql as ks_sql
-from keystone.identity.mapping_backends import mapping as mapping_backend
-
-
-LOG = log.getLogger(__name__)
-
-
-def upgrade(migrate_engine):
- meta = sql.MetaData()
- meta.bind = migrate_engine
-
- if migrate_engine.name == 'mysql':
- # In Folsom we explicitly converted migrate_version to UTF8.
- migrate_engine.execute(
- 'ALTER TABLE migrate_version CONVERT TO CHARACTER SET utf8')
- # Set default DB charset to UTF8.
- migrate_engine.execute(
- 'ALTER DATABASE %s DEFAULT CHARACTER SET utf8' %
- migrate_engine.url.database)
-
- credential = sql.Table(
- 'credential', meta,
- sql.Column('id', sql.String(length=64), primary_key=True),
- sql.Column('user_id', sql.String(length=64), nullable=False),
- sql.Column('project_id', sql.String(length=64)),
- sql.Column('blob', ks_sql.JsonBlob, nullable=False),
- sql.Column('type', sql.String(length=255), nullable=False),
- sql.Column('extra', ks_sql.JsonBlob.impl),
- mysql_engine='InnoDB',
- mysql_charset='utf8')
-
- domain = sql.Table(
- 'domain', meta,
- sql.Column('id', sql.String(length=64), primary_key=True),
- sql.Column('name', sql.String(length=64), nullable=False),
- sql.Column('enabled', sql.Boolean, default=True, nullable=False),
- sql.Column('extra', ks_sql.JsonBlob.impl),
- mysql_engine='InnoDB',
- mysql_charset='utf8')
-
- endpoint = sql.Table(
- 'endpoint', meta,
- sql.Column('id', sql.String(length=64), primary_key=True),
- sql.Column('legacy_endpoint_id', sql.String(length=64)),
- sql.Column('interface', sql.String(length=8), nullable=False),
- sql.Column('service_id', sql.String(length=64), nullable=False),
- sql.Column('url', sql.Text, nullable=False),
- sql.Column('extra', ks_sql.JsonBlob.impl),
- sql.Column('enabled', sql.Boolean, nullable=False, default=True,
- server_default='1'),
- sql.Column('region_id', sql.String(length=255), nullable=True),
- mysql_engine='InnoDB',
- mysql_charset='utf8')
-
- group = sql.Table(
- 'group', meta,
- sql.Column('id', sql.String(length=64), primary_key=True),
- sql.Column('domain_id', sql.String(length=64), nullable=False),
- sql.Column('name', sql.String(length=64), nullable=False),
- sql.Column('description', sql.Text),
- sql.Column('extra', ks_sql.JsonBlob.impl),
- mysql_engine='InnoDB',
- mysql_charset='utf8')
-
- policy = sql.Table(
- 'policy', meta,
- sql.Column('id', sql.String(length=64), primary_key=True),
- sql.Column('type', sql.String(length=255), nullable=False),
- sql.Column('blob', ks_sql.JsonBlob, nullable=False),
- sql.Column('extra', ks_sql.JsonBlob.impl),
- mysql_engine='InnoDB',
- mysql_charset='utf8')
-
- project = sql.Table(
- 'project', meta,
- sql.Column('id', sql.String(length=64), primary_key=True),
- sql.Column('name', sql.String(length=64), nullable=False),
- sql.Column('extra', ks_sql.JsonBlob.impl),
- sql.Column('description', sql.Text),
- sql.Column('enabled', sql.Boolean),
- sql.Column('domain_id', sql.String(length=64), nullable=False),
- sql.Column('parent_id', sql.String(64), nullable=True),
- mysql_engine='InnoDB',
- mysql_charset='utf8')
-
- role = sql.Table(
- 'role', meta,
- sql.Column('id', sql.String(length=64), primary_key=True),
- sql.Column('name', sql.String(length=255), nullable=False),
- sql.Column('extra', ks_sql.JsonBlob.impl),
- mysql_engine='InnoDB',
- mysql_charset='utf8')
-
- service = sql.Table(
- 'service', meta,
- sql.Column('id', sql.String(length=64), primary_key=True),
- sql.Column('type', sql.String(length=255)),
- sql.Column('enabled', sql.Boolean, nullable=False, default=True,
- server_default='1'),
- sql.Column('extra', ks_sql.JsonBlob.impl),
- mysql_engine='InnoDB',
- mysql_charset='utf8')
-
- token = sql.Table(
- 'token', meta,
- sql.Column('id', sql.String(length=64), primary_key=True),
- sql.Column('expires', sql.DateTime, default=None),
- sql.Column('extra', ks_sql.JsonBlob.impl),
- sql.Column('valid', sql.Boolean, default=True, nullable=False),
- sql.Column('trust_id', sql.String(length=64)),
- sql.Column('user_id', sql.String(length=64)),
- mysql_engine='InnoDB',
- mysql_charset='utf8')
-
- trust = sql.Table(
- 'trust', meta,
- sql.Column('id', sql.String(length=64), primary_key=True),
- sql.Column('trustor_user_id', sql.String(length=64), nullable=False),
- sql.Column('trustee_user_id', sql.String(length=64), nullable=False),
- sql.Column('project_id', sql.String(length=64)),
- sql.Column('impersonation', sql.Boolean, nullable=False),
- sql.Column('deleted_at', sql.DateTime),
- sql.Column('expires_at', sql.DateTime),
- sql.Column('remaining_uses', sql.Integer, nullable=True),
- sql.Column('extra', ks_sql.JsonBlob.impl),
- mysql_engine='InnoDB',
- mysql_charset='utf8')
-
- trust_role = sql.Table(
- 'trust_role', meta,
- sql.Column('trust_id', sql.String(length=64), primary_key=True,
- nullable=False),
- sql.Column('role_id', sql.String(length=64), primary_key=True,
- nullable=False),
- mysql_engine='InnoDB',
- mysql_charset='utf8')
-
- user = sql.Table(
- 'user', meta,
- sql.Column('id', sql.String(length=64), primary_key=True),
- sql.Column('name', sql.String(length=255), nullable=False),
- sql.Column('extra', ks_sql.JsonBlob.impl),
- sql.Column('password', sql.String(length=128)),
- sql.Column('enabled', sql.Boolean),
- sql.Column('domain_id', sql.String(length=64), nullable=False),
- sql.Column('default_project_id', sql.String(length=64)),
- mysql_engine='InnoDB',
- mysql_charset='utf8')
-
- user_group_membership = sql.Table(
- 'user_group_membership', meta,
- sql.Column('user_id', sql.String(length=64), primary_key=True),
- sql.Column('group_id', sql.String(length=64), primary_key=True),
- mysql_engine='InnoDB',
- mysql_charset='utf8')
-
- region = sql.Table(
- 'region',
- meta,
- sql.Column('id', sql.String(255), primary_key=True),
- sql.Column('description', sql.String(255), nullable=False),
- sql.Column('parent_region_id', sql.String(255), nullable=True),
- sql.Column('extra', sql.Text()),
- mysql_engine='InnoDB',
- mysql_charset='utf8')
-
- assignment = sql.Table(
- 'assignment',
- meta,
- sql.Column('type', sql.Enum(
- assignment_sql.AssignmentType.USER_PROJECT,
- assignment_sql.AssignmentType.GROUP_PROJECT,
- assignment_sql.AssignmentType.USER_DOMAIN,
- assignment_sql.AssignmentType.GROUP_DOMAIN,
- name='type'),
- nullable=False),
- sql.Column('actor_id', sql.String(64), nullable=False),
- sql.Column('target_id', sql.String(64), nullable=False),
- sql.Column('role_id', sql.String(64), nullable=False),
- sql.Column('inherited', sql.Boolean, default=False, nullable=False),
- sql.PrimaryKeyConstraint('type', 'actor_id', 'target_id', 'role_id'),
- mysql_engine='InnoDB',
- mysql_charset='utf8')
-
- mapping = sql.Table(
- 'id_mapping',
- meta,
- sql.Column('public_id', sql.String(64), primary_key=True),
- sql.Column('domain_id', sql.String(64), nullable=False),
- sql.Column('local_id', sql.String(64), nullable=False),
- sql.Column('entity_type', sql.Enum(
- mapping_backend.EntityType.USER,
- mapping_backend.EntityType.GROUP,
- name='entity_type'),
- nullable=False),
- mysql_engine='InnoDB',
- mysql_charset='utf8')
-
- domain_config_whitelist = sql.Table(
- 'whitelisted_config',
- meta,
- sql.Column('domain_id', sql.String(64), primary_key=True),
- sql.Column('group', sql.String(255), primary_key=True),
- sql.Column('option', sql.String(255), primary_key=True),
- sql.Column('value', ks_sql.JsonBlob.impl, nullable=False),
- mysql_engine='InnoDB',
- mysql_charset='utf8')
-
- domain_config_sensitive = sql.Table(
- 'sensitive_config',
- meta,
- sql.Column('domain_id', sql.String(64), primary_key=True),
- sql.Column('group', sql.String(255), primary_key=True),
- sql.Column('option', sql.String(255), primary_key=True),
- sql.Column('value', ks_sql.JsonBlob.impl, nullable=False),
- mysql_engine='InnoDB',
- mysql_charset='utf8')
-
- # create all tables
- tables = [credential, domain, endpoint, group, policy, project, role,
- service, token, trust, trust_role, user, user_group_membership,
- region, assignment, mapping, domain_config_whitelist,
- domain_config_sensitive]
-
- for table in tables:
- try:
- table.create()
- except Exception:
- LOG.exception('Exception while creating table: %r', table)
- raise
-
- # Unique Constraints
- migrate.UniqueConstraint(user.c.domain_id,
- user.c.name,
- name='ixu_user_name_domain_id').create()
- migrate.UniqueConstraint(group.c.domain_id,
- group.c.name,
- name='ixu_group_name_domain_id').create()
- migrate.UniqueConstraint(role.c.name,
- name='ixu_role_name').create()
- migrate.UniqueConstraint(project.c.domain_id,
- project.c.name,
- name='ixu_project_name_domain_id').create()
- migrate.UniqueConstraint(domain.c.name,
- name='ixu_domain_name').create()
- migrate.UniqueConstraint(mapping.c.domain_id,
- mapping.c.local_id,
- mapping.c.entity_type,
- name='domain_id').create()
-
- # Indexes
- sql.Index('ix_token_expires', token.c.expires).create()
- sql.Index('ix_token_expires_valid', token.c.expires,
- token.c.valid).create()
- sql.Index('ix_actor_id', assignment.c.actor_id).create()
- sql.Index('ix_token_user_id', token.c.user_id).create()
- sql.Index('ix_token_trust_id', token.c.trust_id).create()
- # NOTE(stevemar): The two indexes below were named 'service_id' and
- # 'group_id' in 050_fk_consistent_indexes.py, and need to be preserved
- sql.Index('service_id', endpoint.c.service_id).create()
- sql.Index('group_id', user_group_membership.c.group_id).create()
-
- fkeys = [
- {'columns': [endpoint.c.service_id],
- 'references': [service.c.id]},
-
- {'columns': [user_group_membership.c.group_id],
- 'references': [group.c.id],
- 'name': 'fk_user_group_membership_group_id'},
-
- {'columns': [user_group_membership.c.user_id],
- 'references':[user.c.id],
- 'name': 'fk_user_group_membership_user_id'},
-
- {'columns': [project.c.domain_id],
- 'references': [domain.c.id],
- 'name': 'fk_project_domain_id'},
-
- {'columns': [endpoint.c.region_id],
- 'references': [region.c.id],
- 'name': 'fk_endpoint_region_id'},
-
- {'columns': [project.c.parent_id],
- 'references': [project.c.id],
- 'name': 'project_parent_id_fkey'},
- ]
-
- if migrate_engine.name == 'sqlite':
- # NOTE(stevemar): We need to keep this FK constraint due to 073, but
- # only for sqlite, once we collapse 073 we can remove this constraint
- fkeys.append(
- {'columns': [assignment.c.role_id],
- 'references': [role.c.id],
- 'name': 'fk_assignment_role_id'})
-
- for fkey in fkeys:
- migrate.ForeignKeyConstraint(columns=fkey['columns'],
- refcolumns=fkey['references'],
- name=fkey.get('name')).create()
diff --git a/keystone-moon/keystone/common/sql/migrate_repo/versions/068_placeholder.py b/keystone-moon/keystone/common/sql/migrate_repo/versions/068_placeholder.py
deleted file mode 100644
index 111df9d4..00000000
--- a/keystone-moon/keystone/common/sql/migrate_repo/versions/068_placeholder.py
+++ /dev/null
@@ -1,18 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-# This is a placeholder for Kilo backports. Do not use this number for new
-# Liberty work. New Liberty work starts after all the placeholders.
-
-
-def upgrade(migrate_engine):
- pass
diff --git a/keystone-moon/keystone/common/sql/migrate_repo/versions/069_placeholder.py b/keystone-moon/keystone/common/sql/migrate_repo/versions/069_placeholder.py
deleted file mode 100644
index 111df9d4..00000000
--- a/keystone-moon/keystone/common/sql/migrate_repo/versions/069_placeholder.py
+++ /dev/null
@@ -1,18 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-# This is a placeholder for Kilo backports. Do not use this number for new
-# Liberty work. New Liberty work starts after all the placeholders.
-
-
-def upgrade(migrate_engine):
- pass
diff --git a/keystone-moon/keystone/common/sql/migrate_repo/versions/070_placeholder.py b/keystone-moon/keystone/common/sql/migrate_repo/versions/070_placeholder.py
deleted file mode 100644
index 111df9d4..00000000
--- a/keystone-moon/keystone/common/sql/migrate_repo/versions/070_placeholder.py
+++ /dev/null
@@ -1,18 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-# This is a placeholder for Kilo backports. Do not use this number for new
-# Liberty work. New Liberty work starts after all the placeholders.
-
-
-def upgrade(migrate_engine):
- pass
diff --git a/keystone-moon/keystone/common/sql/migrate_repo/versions/071_placeholder.py b/keystone-moon/keystone/common/sql/migrate_repo/versions/071_placeholder.py
deleted file mode 100644
index 111df9d4..00000000
--- a/keystone-moon/keystone/common/sql/migrate_repo/versions/071_placeholder.py
+++ /dev/null
@@ -1,18 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-# This is a placeholder for Kilo backports. Do not use this number for new
-# Liberty work. New Liberty work starts after all the placeholders.
-
-
-def upgrade(migrate_engine):
- pass
diff --git a/keystone-moon/keystone/common/sql/migrate_repo/versions/072_placeholder.py b/keystone-moon/keystone/common/sql/migrate_repo/versions/072_placeholder.py
deleted file mode 100644
index 111df9d4..00000000
--- a/keystone-moon/keystone/common/sql/migrate_repo/versions/072_placeholder.py
+++ /dev/null
@@ -1,18 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-# This is a placeholder for Kilo backports. Do not use this number for new
-# Liberty work. New Liberty work starts after all the placeholders.
-
-
-def upgrade(migrate_engine):
- pass
diff --git a/keystone-moon/keystone/common/sql/migrate_repo/versions/073_insert_assignment_inherited_pk.py b/keystone-moon/keystone/common/sql/migrate_repo/versions/073_insert_assignment_inherited_pk.py
deleted file mode 100644
index 205f809e..00000000
--- a/keystone-moon/keystone/common/sql/migrate_repo/versions/073_insert_assignment_inherited_pk.py
+++ /dev/null
@@ -1,113 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import migrate
-import sqlalchemy as sql
-from sqlalchemy.orm import sessionmaker
-
-from keystone.assignment.backends import sql as assignment_sql
-
-
-def upgrade(migrate_engine):
- """Inserts inherited column to assignment table PK constraints.
-
- For non-SQLite databases, it changes the constraint in the existing table.
-
- For SQLite, since changing constraints is not supported, it recreates the
- assignment table with the new PK constraint and migrates the existing data.
-
- """
- ASSIGNMENT_TABLE_NAME = 'assignment'
-
- metadata = sql.MetaData()
- metadata.bind = migrate_engine
-
- # Retrieve the existing assignment table
- assignment_table = sql.Table(ASSIGNMENT_TABLE_NAME, metadata,
- autoload=True)
-
- if migrate_engine.name == 'sqlite':
- ACTOR_ID_INDEX_NAME = 'ix_actor_id'
- TMP_ASSIGNMENT_TABLE_NAME = 'tmp_assignment'
-
- # Define the new assignment table with a temporary name
- new_assignment_table = sql.Table(
- TMP_ASSIGNMENT_TABLE_NAME, metadata,
- sql.Column('type', sql.Enum(
- assignment_sql.AssignmentType.USER_PROJECT,
- assignment_sql.AssignmentType.GROUP_PROJECT,
- assignment_sql.AssignmentType.USER_DOMAIN,
- assignment_sql.AssignmentType.GROUP_DOMAIN,
- name='type'),
- nullable=False),
- sql.Column('actor_id', sql.String(64), nullable=False),
- sql.Column('target_id', sql.String(64), nullable=False),
- sql.Column('role_id', sql.String(64), sql.ForeignKey('role.id'),
- nullable=False),
- sql.Column('inherited', sql.Boolean, default=False,
- nullable=False),
- sql.PrimaryKeyConstraint('type', 'actor_id', 'target_id',
- 'role_id', 'inherited'),
- mysql_engine='InnoDB',
- mysql_charset='utf8')
-
- # Create the new assignment table
- new_assignment_table.create(migrate_engine, checkfirst=True)
-
- # Change the index from the existing assignment table to the new one
- sql.Index(ACTOR_ID_INDEX_NAME, assignment_table.c.actor_id).drop()
- sql.Index(ACTOR_ID_INDEX_NAME,
- new_assignment_table.c.actor_id).create()
-
- # Instantiate session
- maker = sessionmaker(bind=migrate_engine)
- session = maker()
-
- # Migrate existing data
- insert = new_assignment_table.insert().from_select(
- assignment_table.c, select=session.query(assignment_table))
- session.execute(insert)
- session.commit()
-
- # Drop the existing assignment table, in favor of the new one
- assignment_table.deregister()
- assignment_table.drop()
-
- # Finally, rename the new table to the original assignment table name
- new_assignment_table.rename(ASSIGNMENT_TABLE_NAME)
- elif migrate_engine.name == 'ibm_db_sa':
- # Recreate the existing constraint, marking the inherited column as PK
- # for DB2.
-
- # This is a workaround to the general case in the else statement below.
- # Due to a bug in the DB2 sqlalchemy dialect, Column.alter() actually
- # creates a primary key over only the "inherited" column. This is wrong
- # because the primary key for the table actually covers other columns
- # too, not just the "inherited" column. Since the primary key already
- # exists for the table after the Column.alter() call, it causes the
- # next line to fail with an error that the primary key already exists.
-
- # The workaround here skips doing the Column.alter(). This causes a
- # warning message since the metadata is out of sync. We can remove this
- # workaround once the DB2 sqlalchemy dialect is fixed.
- # DB2 Issue: https://code.google.com/p/ibm-db/issues/detail?id=173
-
- migrate.PrimaryKeyConstraint(table=assignment_table).drop()
- migrate.PrimaryKeyConstraint(
- assignment_table.c.type, assignment_table.c.actor_id,
- assignment_table.c.target_id, assignment_table.c.role_id,
- assignment_table.c.inherited).create()
- else:
- # Recreate the existing constraint, marking the inherited column as PK
- migrate.PrimaryKeyConstraint(table=assignment_table).drop()
- assignment_table.c.inherited.alter(primary_key=True)
- migrate.PrimaryKeyConstraint(table=assignment_table).create()
diff --git a/keystone-moon/keystone/common/sql/migrate_repo/versions/074_add_is_domain_project.py b/keystone-moon/keystone/common/sql/migrate_repo/versions/074_add_is_domain_project.py
deleted file mode 100644
index dcb89b07..00000000
--- a/keystone-moon/keystone/common/sql/migrate_repo/versions/074_add_is_domain_project.py
+++ /dev/null
@@ -1,27 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import sqlalchemy as sql
-
-
-_PROJECT_TABLE_NAME = 'project'
-_IS_DOMAIN_COLUMN_NAME = 'is_domain'
-
-
-def upgrade(migrate_engine):
- meta = sql.MetaData()
- meta.bind = migrate_engine
-
- project_table = sql.Table(_PROJECT_TABLE_NAME, meta, autoload=True)
- is_domain = sql.Column(_IS_DOMAIN_COLUMN_NAME, sql.Boolean, nullable=False,
- server_default='0', default=False)
- project_table.create_column(is_domain)
diff --git a/keystone-moon/keystone/common/sql/migrate_repo/versions/075_confirm_config_registration.py b/keystone-moon/keystone/common/sql/migrate_repo/versions/075_confirm_config_registration.py
deleted file mode 100644
index 576842c6..00000000
--- a/keystone-moon/keystone/common/sql/migrate_repo/versions/075_confirm_config_registration.py
+++ /dev/null
@@ -1,29 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import sqlalchemy as sql
-
-REGISTRATION_TABLE = 'config_register'
-
-
-def upgrade(migrate_engine):
- meta = sql.MetaData()
- meta.bind = migrate_engine
-
- registration_table = sql.Table(
- REGISTRATION_TABLE,
- meta,
- sql.Column('type', sql.String(64), primary_key=True),
- sql.Column('domain_id', sql.String(64), nullable=False),
- mysql_engine='InnoDB',
- mysql_charset='utf8')
- registration_table.create(migrate_engine, checkfirst=True)
diff --git a/keystone-moon/keystone/common/sql/migrate_repo/versions/076_placeholder.py b/keystone-moon/keystone/common/sql/migrate_repo/versions/076_placeholder.py
deleted file mode 100644
index 9f6e8415..00000000
--- a/keystone-moon/keystone/common/sql/migrate_repo/versions/076_placeholder.py
+++ /dev/null
@@ -1,18 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-# This is a placeholder for Liberty backports. Do not use this number for new
-# Mitaka work. New Mitaka work starts after all the placeholders.
-
-
-def upgrade(migrate_engine):
- pass
diff --git a/keystone-moon/keystone/common/sql/migrate_repo/versions/077_placeholder.py b/keystone-moon/keystone/common/sql/migrate_repo/versions/077_placeholder.py
deleted file mode 100644
index 9f6e8415..00000000
--- a/keystone-moon/keystone/common/sql/migrate_repo/versions/077_placeholder.py
+++ /dev/null
@@ -1,18 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-# This is a placeholder for Liberty backports. Do not use this number for new
-# Mitaka work. New Mitaka work starts after all the placeholders.
-
-
-def upgrade(migrate_engine):
- pass
diff --git a/keystone-moon/keystone/common/sql/migrate_repo/versions/078_placeholder.py b/keystone-moon/keystone/common/sql/migrate_repo/versions/078_placeholder.py
deleted file mode 100644
index 9f6e8415..00000000
--- a/keystone-moon/keystone/common/sql/migrate_repo/versions/078_placeholder.py
+++ /dev/null
@@ -1,18 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-# This is a placeholder for Liberty backports. Do not use this number for new
-# Mitaka work. New Mitaka work starts after all the placeholders.
-
-
-def upgrade(migrate_engine):
- pass
diff --git a/keystone-moon/keystone/common/sql/migrate_repo/versions/079_placeholder.py b/keystone-moon/keystone/common/sql/migrate_repo/versions/079_placeholder.py
deleted file mode 100644
index 9f6e8415..00000000
--- a/keystone-moon/keystone/common/sql/migrate_repo/versions/079_placeholder.py
+++ /dev/null
@@ -1,18 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-# This is a placeholder for Liberty backports. Do not use this number for new
-# Mitaka work. New Mitaka work starts after all the placeholders.
-
-
-def upgrade(migrate_engine):
- pass
diff --git a/keystone-moon/keystone/common/sql/migrate_repo/versions/080_placeholder.py b/keystone-moon/keystone/common/sql/migrate_repo/versions/080_placeholder.py
deleted file mode 100644
index 9f6e8415..00000000
--- a/keystone-moon/keystone/common/sql/migrate_repo/versions/080_placeholder.py
+++ /dev/null
@@ -1,18 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-# This is a placeholder for Liberty backports. Do not use this number for new
-# Mitaka work. New Mitaka work starts after all the placeholders.
-
-
-def upgrade(migrate_engine):
- pass
diff --git a/keystone-moon/keystone/common/sql/migrate_repo/versions/081_add_endpoint_policy_table.py b/keystone-moon/keystone/common/sql/migrate_repo/versions/081_add_endpoint_policy_table.py
deleted file mode 100644
index a0c307d0..00000000
--- a/keystone-moon/keystone/common/sql/migrate_repo/versions/081_add_endpoint_policy_table.py
+++ /dev/null
@@ -1,54 +0,0 @@
-# Copyright 2014 IBM Corp.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import sqlalchemy as sql
-
-from keystone.common.sql import migration_helpers
-
-
-def upgrade(migrate_engine):
- try:
- extension_version = migration_helpers.get_db_version(
- extension='endpoint_policy',
- engine=migrate_engine)
- except Exception:
- extension_version = 0
-
- # This migration corresponds to endpoint_policy extension migration 1. Only
- # update if it has not been run.
- if extension_version >= 1:
- return
-
- # Upgrade operations go here. Don't create your own engine; bind
- # migrate_engine to your metadata
- meta = sql.MetaData()
- meta.bind = migrate_engine
-
- endpoint_policy_table = sql.Table(
- 'policy_association',
- meta,
- sql.Column('id', sql.String(64), primary_key=True),
- sql.Column('policy_id', sql.String(64),
- nullable=False),
- sql.Column('endpoint_id', sql.String(64),
- nullable=True),
- sql.Column('service_id', sql.String(64),
- nullable=True),
- sql.Column('region_id', sql.String(64),
- nullable=True),
- sql.UniqueConstraint('endpoint_id', 'service_id', 'region_id'),
- mysql_engine='InnoDB',
- mysql_charset='utf8')
-
- endpoint_policy_table.create(migrate_engine, checkfirst=True)
diff --git a/keystone-moon/keystone/common/sql/migrate_repo/versions/082_add_federation_tables.py b/keystone-moon/keystone/common/sql/migrate_repo/versions/082_add_federation_tables.py
deleted file mode 100644
index 7e426373..00000000
--- a/keystone-moon/keystone/common/sql/migrate_repo/versions/082_add_federation_tables.py
+++ /dev/null
@@ -1,97 +0,0 @@
-# Copyright 2014 IBM Corp.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from oslo_config import cfg
-import sqlalchemy as sql
-
-from keystone.common.sql import migration_helpers
-
-CONF = cfg.CONF
-_RELAY_STATE_PREFIX = 'relay_state_prefix'
-
-
-def upgrade(migrate_engine):
- try:
- extension_version = migration_helpers.get_db_version(
- extension='federation',
- engine=migrate_engine)
- except Exception:
- extension_version = 0
-
- # This migration corresponds to federation extension migration 8. Only
- # update if it has not been run.
- if extension_version >= 8:
- return
-
- # Upgrade operations go here. Don't create your own engine; bind
- # migrate_engine to your metadata
- meta = sql.MetaData()
- meta.bind = migrate_engine
-
- idp_table = sql.Table(
- 'identity_provider',
- meta,
- sql.Column('id', sql.String(64), primary_key=True),
- sql.Column('enabled', sql.Boolean, nullable=False),
- sql.Column('description', sql.Text(), nullable=True),
- mysql_engine='InnoDB',
- mysql_charset='utf8')
- idp_table.create(migrate_engine, checkfirst=True)
-
- federation_protocol_table = sql.Table(
- 'federation_protocol',
- meta,
- sql.Column('id', sql.String(64), primary_key=True),
- sql.Column('idp_id', sql.String(64),
- sql.ForeignKey('identity_provider.id', ondelete='CASCADE'),
- primary_key=True),
- sql.Column('mapping_id', sql.String(64), nullable=False),
- mysql_engine='InnoDB',
- mysql_charset='utf8')
- federation_protocol_table.create(migrate_engine, checkfirst=True)
-
- mapping_table = sql.Table(
- 'mapping',
- meta,
- sql.Column('id', sql.String(64), primary_key=True),
- sql.Column('rules', sql.Text(), nullable=False),
- mysql_engine='InnoDB',
- mysql_charset='utf8')
- mapping_table.create(migrate_engine, checkfirst=True)
-
- relay_state_prefix_default = CONF.saml.relay_state_prefix
- sp_table = sql.Table(
- 'service_provider',
- meta,
- sql.Column('auth_url', sql.String(256), nullable=False),
- sql.Column('id', sql.String(64), primary_key=True),
- sql.Column('enabled', sql.Boolean, nullable=False),
- sql.Column('description', sql.Text(), nullable=True),
- sql.Column('sp_url', sql.String(256), nullable=False),
- sql.Column(_RELAY_STATE_PREFIX, sql.String(256), nullable=False,
- server_default=relay_state_prefix_default),
- mysql_engine='InnoDB',
- mysql_charset='utf8')
- sp_table.create(migrate_engine, checkfirst=True)
-
- idp_table = sql.Table('identity_provider', meta, autoload=True)
- remote_id_table = sql.Table(
- 'idp_remote_ids',
- meta,
- sql.Column('idp_id', sql.String(64),
- sql.ForeignKey('identity_provider.id', ondelete='CASCADE')),
- sql.Column('remote_id', sql.String(255), primary_key=True),
- mysql_engine='InnoDB',
- mysql_charset='utf8')
- remote_id_table.create(migrate_engine, checkfirst=True)
diff --git a/keystone-moon/keystone/common/sql/migrate_repo/versions/083_add_oauth1_tables.py b/keystone-moon/keystone/common/sql/migrate_repo/versions/083_add_oauth1_tables.py
deleted file mode 100644
index 5a859b4b..00000000
--- a/keystone-moon/keystone/common/sql/migrate_repo/versions/083_add_oauth1_tables.py
+++ /dev/null
@@ -1,75 +0,0 @@
-# Copyright 2014 IBM Corp.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import sqlalchemy as sql
-
-from keystone.common.sql import migration_helpers
-
-
-def upgrade(migrate_engine):
- try:
- extension_version = migration_helpers.get_db_version(
- extension='oauth1',
- engine=migrate_engine)
- except Exception:
- extension_version = 0
-
- # This migration corresponds to oauth extension migration 5. Only
- # update if it has not been run.
- if extension_version >= 5:
- return
-
- # Upgrade operations go here. Don't create your own engine; bind
- # migrate_engine to your metadata
- meta = sql.MetaData()
- meta.bind = migrate_engine
-
- consumer_table = sql.Table(
- 'consumer',
- meta,
- sql.Column('id', sql.String(64), primary_key=True, nullable=False),
- sql.Column('description', sql.String(64), nullable=True),
- sql.Column('secret', sql.String(64), nullable=False),
- sql.Column('extra', sql.Text(), nullable=False))
- consumer_table.create(migrate_engine, checkfirst=True)
-
- request_token_table = sql.Table(
- 'request_token',
- meta,
- sql.Column('id', sql.String(64), primary_key=True, nullable=False),
- sql.Column('request_secret', sql.String(64), nullable=False),
- sql.Column('verifier', sql.String(64), nullable=True),
- sql.Column('authorizing_user_id', sql.String(64), nullable=True),
- sql.Column('requested_project_id', sql.String(64), nullable=False),
- sql.Column('role_ids', sql.Text(), nullable=True),
- sql.Column('consumer_id', sql.String(64),
- sql.ForeignKey('consumer.id'),
- nullable=False, index=True),
- sql.Column('expires_at', sql.String(64), nullable=True))
- request_token_table.create(migrate_engine, checkfirst=True)
-
- access_token_table = sql.Table(
- 'access_token',
- meta,
- sql.Column('id', sql.String(64), primary_key=True, nullable=False),
- sql.Column('access_secret', sql.String(64), nullable=False),
- sql.Column('authorizing_user_id', sql.String(64),
- nullable=False, index=True),
- sql.Column('project_id', sql.String(64), nullable=False),
- sql.Column('role_ids', sql.Text(), nullable=False),
- sql.Column('consumer_id', sql.String(64),
- sql.ForeignKey('consumer.id'),
- nullable=False, index=True),
- sql.Column('expires_at', sql.String(64), nullable=True))
- access_token_table.create(migrate_engine, checkfirst=True)
diff --git a/keystone-moon/keystone/common/sql/migrate_repo/versions/084_add_revoke_tables.py b/keystone-moon/keystone/common/sql/migrate_repo/versions/084_add_revoke_tables.py
deleted file mode 100644
index 1a28a53c..00000000
--- a/keystone-moon/keystone/common/sql/migrate_repo/versions/084_add_revoke_tables.py
+++ /dev/null
@@ -1,55 +0,0 @@
-# Copyright 2014 IBM Corp.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import sqlalchemy as sql
-
-from keystone.common.sql import migration_helpers
-
-
-def upgrade(migrate_engine):
- try:
- extension_version = migration_helpers.get_db_version(
- extension='revoke',
- engine=migrate_engine)
- except Exception:
- extension_version = 0
-
- # This migration corresponds to revoke extension migration 2. Only
- # update if it has not been run.
- if extension_version >= 2:
- return
-
- # Upgrade operations go here. Don't create your own engine; bind
- # migrate_engine to your metadata
- meta = sql.MetaData()
- meta.bind = migrate_engine
-
- service_table = sql.Table(
- 'revocation_event',
- meta,
- sql.Column('id', sql.String(64), primary_key=True),
- sql.Column('domain_id', sql.String(64)),
- sql.Column('project_id', sql.String(64)),
- sql.Column('user_id', sql.String(64)),
- sql.Column('role_id', sql.String(64)),
- sql.Column('trust_id', sql.String(64)),
- sql.Column('consumer_id', sql.String(64)),
- sql.Column('access_token_id', sql.String(64)),
- sql.Column('issued_before', sql.DateTime(), nullable=False),
- sql.Column('expires_at', sql.DateTime()),
- sql.Column('revoked_at', sql.DateTime(), index=True, nullable=False),
- sql.Column('audit_id', sql.String(32), nullable=True),
- sql.Column('audit_chain_id', sql.String(32), nullable=True))
-
- service_table.create(migrate_engine, checkfirst=True)
diff --git a/keystone-moon/keystone/common/sql/migrate_repo/versions/085_add_endpoint_filtering_table.py b/keystone-moon/keystone/common/sql/migrate_repo/versions/085_add_endpoint_filtering_table.py
deleted file mode 100644
index 5790bd98..00000000
--- a/keystone-moon/keystone/common/sql/migrate_repo/versions/085_add_endpoint_filtering_table.py
+++ /dev/null
@@ -1,70 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import sqlalchemy as sql
-
-from keystone.common.sql import migration_helpers
-
-
-def upgrade(migrate_engine):
- try:
- extension_version = migration_helpers.get_db_version(
- extension='endpoint_filter',
- engine=migrate_engine)
- except Exception:
- extension_version = 0
-
- # This migration corresponds to endpoint_filter extension migration 2. Only
- # update if it has not been run.
- if extension_version >= 2:
- return
-
- # Upgrade operations go here. Don't create your own engine; bind
- # migrate_engine to your metadata
- meta = sql.MetaData()
- meta.bind = migrate_engine
-
- EP_GROUP_ID = 'endpoint_group_id'
- PROJECT_ID = 'project_id'
-
- endpoint_filtering_table = sql.Table(
- 'project_endpoint',
- meta,
- sql.Column(
- 'endpoint_id',
- sql.String(64),
- primary_key=True,
- nullable=False),
- sql.Column(
- 'project_id',
- sql.String(64),
- primary_key=True,
- nullable=False))
- endpoint_filtering_table.create(migrate_engine, checkfirst=True)
-
- endpoint_group_table = sql.Table(
- 'endpoint_group',
- meta,
- sql.Column('id', sql.String(64), primary_key=True),
- sql.Column('name', sql.String(255), nullable=False),
- sql.Column('description', sql.Text, nullable=True),
- sql.Column('filters', sql.Text(), nullable=False))
- endpoint_group_table.create(migrate_engine, checkfirst=True)
-
- project_endpoint_group_table = sql.Table(
- 'project_endpoint_group',
- meta,
- sql.Column(EP_GROUP_ID, sql.String(64),
- sql.ForeignKey('endpoint_group.id'), nullable=False),
- sql.Column(PROJECT_ID, sql.String(64), nullable=False),
- sql.PrimaryKeyConstraint(EP_GROUP_ID, PROJECT_ID))
- project_endpoint_group_table.create(migrate_engine, checkfirst=True)
diff --git a/keystone-moon/keystone/common/sql/migrate_repo/versions/086_add_duplicate_constraint_trusts.py b/keystone-moon/keystone/common/sql/migrate_repo/versions/086_add_duplicate_constraint_trusts.py
deleted file mode 100644
index 2b115ea4..00000000
--- a/keystone-moon/keystone/common/sql/migrate_repo/versions/086_add_duplicate_constraint_trusts.py
+++ /dev/null
@@ -1,26 +0,0 @@
-# Copyright 2015 Intel Corporation
-# All Rights Reserved
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from migrate import UniqueConstraint
-from sqlalchemy import MetaData, Table
-
-
-def upgrade(migrate_engine):
- meta = MetaData(bind=migrate_engine)
- trusts = Table('trust', meta, autoload=True)
-
- UniqueConstraint('trustor_user_id', 'trustee_user_id', 'project_id',
- 'impersonation', 'expires_at', table=trusts,
- name='duplicate_trust_constraint').create()
diff --git a/keystone-moon/keystone/common/sql/migrate_repo/versions/087_implied_roles.py b/keystone-moon/keystone/common/sql/migrate_repo/versions/087_implied_roles.py
deleted file mode 100644
index 7713ce8f..00000000
--- a/keystone-moon/keystone/common/sql/migrate_repo/versions/087_implied_roles.py
+++ /dev/null
@@ -1,43 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-
-import migrate
-import sqlalchemy as sql
-
-
-ROLE_TABLE = 'role'
-
-
-def upgrade(migrate_engine):
- meta = sql.MetaData()
- meta.bind = migrate_engine
-
- implied_role = sql.Table(
- 'implied_role', meta,
- sql.Column('prior_role_id', sql.String(length=64), primary_key=True),
- sql.Column(
- 'implied_role_id', sql.String(length=64), primary_key=True),
- mysql_engine='InnoDB',
- mysql_charset='utf8')
- implied_role.create()
- role = sql.Table(ROLE_TABLE, meta, autoload=True)
- fkeys = [
- {'columns': [implied_role.c.prior_role_id],
- 'references': [role.c.id]},
- {'columns': [implied_role.c.implied_role_id],
- 'references': [role.c.id]},
- ]
- for fkey in fkeys:
- migrate.ForeignKeyConstraint(columns=fkey['columns'],
- refcolumns=fkey['references'],
- name=fkey.get('name')).create()
diff --git a/keystone-moon/keystone/common/sql/migrate_repo/versions/088_domain_specific_roles.py b/keystone-moon/keystone/common/sql/migrate_repo/versions/088_domain_specific_roles.py
deleted file mode 100644
index 8b792dfa..00000000
--- a/keystone-moon/keystone/common/sql/migrate_repo/versions/088_domain_specific_roles.py
+++ /dev/null
@@ -1,60 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import migrate
-import sqlalchemy as sql
-
-
-_ROLE_NAME_NEW_CONSTRAINT = 'ixu_role_name_domain_id'
-_ROLE_TABLE_NAME = 'role'
-_ROLE_NAME_COLUMN_NAME = 'name'
-_DOMAIN_ID_COLUMN_NAME = 'domain_id'
-_NULL_DOMAIN_ID = '<<null>>'
-
-
-def upgrade(migrate_engine):
- meta = sql.MetaData()
- meta.bind = migrate_engine
-
- role_table = sql.Table(_ROLE_TABLE_NAME, meta, autoload=True)
- domain_id = sql.Column(_DOMAIN_ID_COLUMN_NAME, sql.String(64),
- nullable=False, server_default=_NULL_DOMAIN_ID)
-
- # NOTE(morganfainberg): the `role_name` unique constraint is not
- # guaranteed to be a fixed name, such as 'ixu_role_name`, so we need to
- # search for the correct constraint that only affects role_table.c.name
- # and drop that constraint.
- to_drop = None
- if migrate_engine.name == 'mysql':
- for c in role_table.indexes:
- if (c.unique and len(c.columns) == 1 and
- _ROLE_NAME_COLUMN_NAME in c.columns):
- to_drop = c
- break
- else:
- for c in role_table.constraints:
- if len(c.columns) == 1 and _ROLE_NAME_COLUMN_NAME in c.columns:
- to_drop = c
- break
-
- if to_drop is not None:
- migrate.UniqueConstraint(role_table.c.name,
- name=to_drop.name).drop()
-
- # perform changes after constraint is dropped.
- if 'domain_id' not in role_table.columns:
- # Only create the column if it doesn't already exist.
- role_table.create_column(domain_id)
-
- migrate.UniqueConstraint(role_table.c.name,
- role_table.c.domain_id,
- name=_ROLE_NAME_NEW_CONSTRAINT).create()
diff --git a/keystone-moon/keystone/common/sql/migrate_repo/versions/089_add_root_of_all_domains.py b/keystone-moon/keystone/common/sql/migrate_repo/versions/089_add_root_of_all_domains.py
deleted file mode 100644
index 477c719a..00000000
--- a/keystone-moon/keystone/common/sql/migrate_repo/versions/089_add_root_of_all_domains.py
+++ /dev/null
@@ -1,76 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import sqlalchemy as sql
-
-
-_PROJECT_TABLE_NAME = 'project'
-_DOMAIN_TABLE_NAME = 'domain'
-NULL_DOMAIN_ID = '<<keystone.domain.root>>'
-
-
-def upgrade(migrate_engine):
-
- def _generate_root_domain_project():
- # Generate a project that will act as a root for all domains, in order
- # for use to be able to use a FK constraint on domain_id. Projects
- # acting as a domain will not reference this as their parent_id, just
- # as domain_id.
- #
- # This special project is filtered out by the driver, so is never
- # visible to the manager or API.
-
- project_ref = {
- 'id': NULL_DOMAIN_ID,
- 'name': NULL_DOMAIN_ID,
- 'enabled': False,
- 'description': '',
- 'domain_id': NULL_DOMAIN_ID,
- 'is_domain': True,
- 'parent_id': None,
- 'extra': '{}'
- }
- return project_ref
-
- def _generate_root_domain():
- # Generate a similar root for the domain table, this is an interim
- # step so as to allow continuation of current project domain_id FK.
- #
- # This special domain is filtered out by the driver, so is never
- # visible to the manager or API.
-
- domain_ref = {
- 'id': NULL_DOMAIN_ID,
- 'name': NULL_DOMAIN_ID,
- 'enabled': False,
- 'extra': '{}'
- }
- return domain_ref
-
- meta = sql.MetaData()
- meta.bind = migrate_engine
- session = sql.orm.sessionmaker(bind=migrate_engine)()
-
- project_table = sql.Table(_PROJECT_TABLE_NAME, meta, autoload=True)
- domain_table = sql.Table(_DOMAIN_TABLE_NAME, meta, autoload=True)
-
- root_domain = _generate_root_domain()
- new_entry = domain_table.insert().values(**root_domain)
- session.execute(new_entry)
- session.commit()
-
- root_domain_project = _generate_root_domain_project()
- new_entry = project_table.insert().values(**root_domain_project)
- session.execute(new_entry)
- session.commit()
-
- session.close()
diff --git a/keystone-moon/keystone/common/sql/migrate_repo/versions/090_add_local_user_and_password_tables.py b/keystone-moon/keystone/common/sql/migrate_repo/versions/090_add_local_user_and_password_tables.py
deleted file mode 100644
index 800ba47e..00000000
--- a/keystone-moon/keystone/common/sql/migrate_repo/versions/090_add_local_user_and_password_tables.py
+++ /dev/null
@@ -1,42 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import sqlalchemy as sql
-
-
-def upgrade(migrate_engine):
- meta = sql.MetaData()
- meta.bind = migrate_engine
-
- user = sql.Table('user', meta, autoload=True)
-
- local_user = sql.Table(
- 'local_user',
- meta,
- sql.Column('id', sql.Integer, primary_key=True, nullable=False),
- sql.Column('user_id', sql.String(64),
- sql.ForeignKey(user.c.id, ondelete='CASCADE'),
- nullable=False, unique=True),
- sql.Column('domain_id', sql.String(64), nullable=False),
- sql.Column('name', sql.String(255), nullable=False),
- sql.UniqueConstraint('domain_id', 'name'))
- local_user.create(migrate_engine, checkfirst=True)
-
- password = sql.Table(
- 'password',
- meta,
- sql.Column('id', sql.Integer, primary_key=True, nullable=False),
- sql.Column('local_user_id', sql.Integer,
- sql.ForeignKey(local_user.c.id, ondelete='CASCADE'),
- nullable=False),
- sql.Column('password', sql.String(128), nullable=False))
- password.create(migrate_engine, checkfirst=True)
diff --git a/keystone-moon/keystone/common/sql/migrate_repo/versions/091_migrate_data_to_local_user_and_password_tables.py b/keystone-moon/keystone/common/sql/migrate_repo/versions/091_migrate_data_to_local_user_and_password_tables.py
deleted file mode 100644
index 1f41fd89..00000000
--- a/keystone-moon/keystone/common/sql/migrate_repo/versions/091_migrate_data_to_local_user_and_password_tables.py
+++ /dev/null
@@ -1,66 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import migrate
-import sqlalchemy as sql
-from sqlalchemy import func
-
-
-def upgrade(migrate_engine):
- meta = sql.MetaData()
- meta.bind = migrate_engine
-
- user_table = sql.Table('user', meta, autoload=True)
- local_user_table = sql.Table('local_user', meta, autoload=True)
- password_table = sql.Table('password', meta, autoload=True)
-
- # migrate data to local_user table
- local_user_values = []
- for row in user_table.select().execute():
- # skip the row that already exists in `local_user`, this could
- # happen if run into a partially-migrated table due to the
- # bug #1549705.
- filter_by = local_user_table.c.user_id == row['id']
- user_count = sql.select([func.count()]).select_from(
- local_user_table).where(filter_by).execute().fetchone()[0]
- if user_count == 0:
- local_user_values.append({'user_id': row['id'],
- 'domain_id': row['domain_id'],
- 'name': row['name']})
- if local_user_values:
- local_user_table.insert().values(local_user_values).execute()
-
- # migrate data to password table
- sel = (
- sql.select([user_table, local_user_table], use_labels=True)
- .select_from(user_table.join(local_user_table, user_table.c.id ==
- local_user_table.c.user_id))
- )
- user_rows = sel.execute()
- password_values = []
- for row in user_rows:
- if row['user_password']:
- password_values.append({'local_user_id': row['local_user_id'],
- 'password': row['user_password']})
- if password_values:
- password_table.insert().values(password_values).execute()
-
- # remove domain_id and name unique constraint
- if migrate_engine.name != 'sqlite':
- migrate.UniqueConstraint(user_table.c.domain_id,
- user_table.c.name,
- name='ixu_user_name_domain_id').drop()
-
- # drop user columns
- user_table.c.domain_id.drop()
- user_table.c.name.drop()
- user_table.c.password.drop()
diff --git a/keystone-moon/keystone/common/sql/migrate_repo/versions/092_make_implied_roles_fks_cascaded.py b/keystone-moon/keystone/common/sql/migrate_repo/versions/092_make_implied_roles_fks_cascaded.py
deleted file mode 100644
index 5e841899..00000000
--- a/keystone-moon/keystone/common/sql/migrate_repo/versions/092_make_implied_roles_fks_cascaded.py
+++ /dev/null
@@ -1,46 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-
-import migrate
-import sqlalchemy as sql
-
-
-ROLE_TABLE = 'role'
-IMPLIED_ROLE_TABLE = 'implied_role'
-
-
-def upgrade(migrate_engine):
- meta = sql.MetaData()
- meta.bind = migrate_engine
-
- role = sql.Table(ROLE_TABLE, meta, autoload=True)
- implied_role = sql.Table(IMPLIED_ROLE_TABLE, meta, autoload=True)
-
- fkeys = [
- {'columns': [implied_role.c.prior_role_id],
- 'references': [role.c.id]},
- {'columns': [implied_role.c.implied_role_id],
- 'references': [role.c.id]},
- ]
-
- # NOTE(stevemar): We need to divide these into two separate loops otherwise
- # they may clobber each other and only end up with one foreign key.
- for fkey in fkeys:
- migrate.ForeignKeyConstraint(columns=fkey['columns'],
- refcolumns=fkey['references'],
- name=fkey.get('name')).drop()
- for fkey in fkeys:
- migrate.ForeignKeyConstraint(columns=fkey['columns'],
- refcolumns=fkey['references'],
- name=fkey.get('name'),
- ondelete="CASCADE").create()
diff --git a/keystone-moon/keystone/common/sql/migrate_repo/versions/093_migrate_domains_to_projects.py b/keystone-moon/keystone/common/sql/migrate_repo/versions/093_migrate_domains_to_projects.py
deleted file mode 100644
index f6bba7d9..00000000
--- a/keystone-moon/keystone/common/sql/migrate_repo/versions/093_migrate_domains_to_projects.py
+++ /dev/null
@@ -1,125 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import json
-
-import sqlalchemy as sql
-
-from keystone.common.sql import migration_helpers
-
-
-_PROJECT_TABLE_NAME = 'project'
-_DOMAIN_TABLE_NAME = 'domain'
-_PARENT_ID_COLUMN_NAME = 'parent_id'
-_DOMAIN_ID_COLUMN_NAME = 'domain_id'
-
-# Above the driver level, the domain_id of a project acting as a domain is
-# None. However, in order to enable sql integrity constraints to still operate
-# on this column, we create a special "root of all domains" row, with an ID of
-# NULL_DOMAIN_ID, which all projects acting as a domain reference in their
-# domain_id attribute. This special row, as well as NULL_DOMAIN_ID, are never
-# exposed outside of sql driver layer.
-NULL_DOMAIN_ID = '<<keystone.domain.root>>'
-
-
-def list_existing_project_constraints(project_table, domain_table):
- constraints = [{'table': project_table,
- 'fk_column': _PARENT_ID_COLUMN_NAME,
- 'ref_column': project_table.c.id},
- {'table': project_table,
- 'fk_column': _DOMAIN_ID_COLUMN_NAME,
- 'ref_column': domain_table.c.id}]
-
- return constraints
-
-
-def list_new_project_constraints(project_table):
- constraints = [{'table': project_table,
- 'fk_column': _PARENT_ID_COLUMN_NAME,
- 'ref_column': project_table.c.id},
- {'table': project_table,
- 'fk_column': _DOMAIN_ID_COLUMN_NAME,
- 'ref_column': project_table.c.id}]
-
- return constraints
-
-
-def upgrade(migrate_engine):
-
- def _project_from_domain(domain):
- # Creates a project dict with is_domain=True from the provided
- # domain.
-
- description = None
- extra = {}
- if domain.extra is not None:
- # 'description' property is an extra attribute in domains but a
- # first class attribute in projects
- extra = json.loads(domain.extra)
- description = extra.pop('description', None)
-
- return {
- 'id': domain.id,
- 'name': domain.name,
- 'enabled': domain.enabled,
- 'description': description,
- 'domain_id': NULL_DOMAIN_ID,
- 'is_domain': True,
- 'parent_id': None,
- 'extra': json.dumps(extra)
- }
-
- meta = sql.MetaData()
- meta.bind = migrate_engine
- session = sql.orm.sessionmaker(bind=migrate_engine)()
-
- project_table = sql.Table(_PROJECT_TABLE_NAME, meta, autoload=True)
- domain_table = sql.Table(_DOMAIN_TABLE_NAME, meta, autoload=True)
-
- # NOTE(htruta): Remove the parent_id constraint during the migration
- # because for every root project inside this domain, we will set
- # the project domain_id to be its parent_id. We re-enable the constraint
- # in the end of this method. We also remove the domain_id constraint,
- # while be recreated a FK to the project_id at the end.
- migration_helpers.remove_constraints(
- list_existing_project_constraints(project_table, domain_table))
-
- # For each domain, create a project acting as a domain. We ignore the
- # "root of all domains" row, since we already have one of these in the
- # project table.
- domains = list(domain_table.select().execute())
- for domain in domains:
- if domain.id == NULL_DOMAIN_ID:
- continue
- is_domain_project = _project_from_domain(domain)
- new_entry = project_table.insert().values(**is_domain_project)
- session.execute(new_entry)
- session.commit()
-
- # For each project, that has no parent (i.e. a top level project), update
- # it's parent_id to point at the project acting as its domain. We ignore
- # the "root of all domains" row, since its parent_id must always be None.
- projects = list(project_table.select().execute())
- for project in projects:
- if (project.parent_id is not None or project.is_domain or
- project.id == NULL_DOMAIN_ID):
- continue
- values = {'parent_id': project.domain_id}
- update = project_table.update().where(
- project_table.c.id == project.id).values(values)
- session.execute(update)
- session.commit()
-
- migration_helpers.add_constraints(
- list_new_project_constraints(project_table))
-
- session.close()
diff --git a/keystone-moon/keystone/common/sql/migrate_repo/versions/094_add_federated_user_table.py b/keystone-moon/keystone/common/sql/migrate_repo/versions/094_add_federated_user_table.py
deleted file mode 100644
index 6fd3f051..00000000
--- a/keystone-moon/keystone/common/sql/migrate_repo/versions/094_add_federated_user_table.py
+++ /dev/null
@@ -1,43 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import migrate
-import sqlalchemy as sql
-
-
-def upgrade(migrate_engine):
- meta = sql.MetaData()
- meta.bind = migrate_engine
-
- user_table = sql.Table('user', meta, autoload=True)
- idp_table = sql.Table('identity_provider', meta, autoload=True)
- protocol_table = sql.Table('federation_protocol', meta, autoload=True)
-
- federated_table = sql.Table(
- 'federated_user',
- meta,
- sql.Column('id', sql.Integer, primary_key=True, nullable=False),
- sql.Column('user_id', sql.String(64),
- sql.ForeignKey(user_table.c.id, ondelete='CASCADE'),
- nullable=False),
- sql.Column('idp_id', sql.String(64),
- sql.ForeignKey(idp_table.c.id, ondelete='CASCADE'),
- nullable=False),
- sql.Column('protocol_id', sql.String(64), nullable=False),
- sql.Column('unique_id', sql.String(255), nullable=False),
- sql.Column('display_name', sql.String(255), nullable=True),
- sql.UniqueConstraint('idp_id', 'protocol_id', 'unique_id'))
- federated_table.create(migrate_engine, checkfirst=True)
-
- migrate.ForeignKeyConstraint(
- columns=[federated_table.c.protocol_id, federated_table.c.idp_id],
- refcolumns=[protocol_table.c.id, protocol_table.c.idp_id]).create()
diff --git a/keystone-moon/keystone/common/sql/migrate_repo/versions/095_add_integer_pkey_to_revocation_event_table.py b/keystone-moon/keystone/common/sql/migrate_repo/versions/095_add_integer_pkey_to_revocation_event_table.py
deleted file mode 100644
index 7a75f7b1..00000000
--- a/keystone-moon/keystone/common/sql/migrate_repo/versions/095_add_integer_pkey_to_revocation_event_table.py
+++ /dev/null
@@ -1,62 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import sqlalchemy as sql
-
-
-def upgrade(migrate_engine):
- meta = sql.MetaData()
- meta.bind = migrate_engine
-
- # You can specify primary keys when creating tables, however adding
- # auto-increment integer primary keys for existing tables is not
- # cross-engine compatibility supported. Thus, the approach is to:
- # (1) create a new revocation_event table with an int pkey,
- # (2) migrate data from the old table to the new table,
- # (3) delete the old revocation_event table
- # (4) rename the new revocation_event table
- revocation_table = sql.Table('revocation_event', meta, autoload=True)
-
- revocation_table_new = sql.Table(
- 'revocation_event_new',
- meta,
- sql.Column('id', sql.Integer, primary_key=True),
- sql.Column('domain_id', sql.String(64)),
- sql.Column('project_id', sql.String(64)),
- sql.Column('user_id', sql.String(64)),
- sql.Column('role_id', sql.String(64)),
- sql.Column('trust_id', sql.String(64)),
- sql.Column('consumer_id', sql.String(64)),
- sql.Column('access_token_id', sql.String(64)),
- sql.Column('issued_before', sql.DateTime(), nullable=False),
- sql.Column('expires_at', sql.DateTime()),
- sql.Column('revoked_at', sql.DateTime(), index=True, nullable=False),
- sql.Column('audit_id', sql.String(32), nullable=True),
- sql.Column('audit_chain_id', sql.String(32), nullable=True))
- revocation_table_new.create(migrate_engine, checkfirst=True)
-
- revocation_table_new.insert().from_select(['domain_id',
- 'project_id',
- 'user_id',
- 'role_id',
- 'trust_id',
- 'consumer_id',
- 'access_token_id',
- 'issued_before',
- 'expires_at',
- 'revoked_at',
- 'audit_id',
- 'audit_chain_id'],
- revocation_table.select())
-
- revocation_table.drop()
- revocation_table_new.rename('revocation_event')
diff --git a/keystone-moon/keystone/common/sql/migrate_repo/versions/096_drop_role_name_constraint.py b/keystone-moon/keystone/common/sql/migrate_repo/versions/096_drop_role_name_constraint.py
deleted file mode 100644
index 0156de21..00000000
--- a/keystone-moon/keystone/common/sql/migrate_repo/versions/096_drop_role_name_constraint.py
+++ /dev/null
@@ -1,50 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import migrate
-import sqlalchemy as sql
-
-_ROLE_TABLE_NAME = 'role'
-_ROLE_NAME_COLUMN_NAME = 'name'
-
-
-def upgrade(migrate_engine):
- meta = sql.MetaData()
- meta.bind = migrate_engine
-
- role_table = sql.Table(_ROLE_TABLE_NAME, meta, autoload=True)
-
- # NOTE(morganfainberg): the `role_name` unique constraint is not
- # guaranteed to be named 'ixu_role_name', so we need to search for the
- # correct constraint that only affects role_table.c.name and drop
- # that constraint.
- #
- # This is an idempotent change that reflects the fix to migration
- # 88 if the role_name unique constraint was not named consistently and
- # someone manually fixed the migrations / db without dropping the
- # old constraint.
- to_drop = None
- if migrate_engine.name == 'mysql':
- for c in role_table.indexes:
- if (c.unique and len(c.columns) == 1 and
- _ROLE_NAME_COLUMN_NAME in c.columns):
- to_drop = c
- break
- else:
- for c in role_table.constraints:
- if len(c.columns) == 1 and _ROLE_NAME_COLUMN_NAME in c.columns:
- to_drop = c
- break
-
- if to_drop is not None:
- migrate.UniqueConstraint(role_table.c.name,
- name=to_drop.name).drop()
diff --git a/keystone-moon/keystone/common/sql/migrate_repo/versions/__init__.py b/keystone-moon/keystone/common/sql/migrate_repo/versions/__init__.py
deleted file mode 100644
index e69de29b..00000000
--- a/keystone-moon/keystone/common/sql/migrate_repo/versions/__init__.py
+++ /dev/null
diff --git a/keystone-moon/keystone/common/sql/migration_helpers.py b/keystone-moon/keystone/common/sql/migration_helpers.py
deleted file mode 100644
index 40c1fbb5..00000000
--- a/keystone-moon/keystone/common/sql/migration_helpers.py
+++ /dev/null
@@ -1,245 +0,0 @@
-# Copyright 2013 OpenStack Foundation
-# Copyright 2013 Red Hat, Inc.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import os
-import sys
-
-import migrate
-from migrate import exceptions
-from oslo_config import cfg
-from oslo_db.sqlalchemy import migration
-from oslo_utils import importutils
-import six
-import sqlalchemy
-
-from keystone.common import sql
-from keystone import contrib
-from keystone import exception
-from keystone.i18n import _
-
-
-CONF = cfg.CONF
-DEFAULT_EXTENSIONS = []
-
-MIGRATED_EXTENSIONS = ['endpoint_policy',
- 'federation',
- 'oauth1',
- 'revoke',
- 'endpoint_filter'
- ]
-
-
-# Different RDBMSs use different schemes for naming the Foreign Key
-# Constraints. SQLAlchemy does not yet attempt to determine the name
-# for the constraint, and instead attempts to deduce it from the column.
-# This fails on MySQL.
-def get_constraints_names(table, column_name):
- fkeys = [fk.name for fk in table.constraints
- if (isinstance(fk, sqlalchemy.ForeignKeyConstraint) and
- column_name in fk.columns)]
- return fkeys
-
-
-# remove_constraints and add_constraints both accept a list of dictionaries
-# that contain:
-# {'table': a sqlalchemy table. The constraint is added to dropped from
-# this table.
-# 'fk_column': the name of a column on the above table, The constraint
-# is added to or dropped from this column
-# 'ref_column':a sqlalchemy column object. This is the reference column
-# for the constraint.
-def remove_constraints(constraints):
- for constraint_def in constraints:
- constraint_names = get_constraints_names(constraint_def['table'],
- constraint_def['fk_column'])
- for constraint_name in constraint_names:
- migrate.ForeignKeyConstraint(
- columns=[getattr(constraint_def['table'].c,
- constraint_def['fk_column'])],
- refcolumns=[constraint_def['ref_column']],
- name=constraint_name).drop()
-
-
-def add_constraints(constraints):
- for constraint_def in constraints:
-
- if constraint_def['table'].kwargs.get('mysql_engine') == 'MyISAM':
- # Don't try to create constraint when using MyISAM because it's
- # not supported.
- continue
-
- ref_col = constraint_def['ref_column']
- ref_engine = ref_col.table.kwargs.get('mysql_engine')
- if ref_engine == 'MyISAM':
- # Don't try to create constraint when using MyISAM because it's
- # not supported.
- continue
-
- migrate.ForeignKeyConstraint(
- columns=[getattr(constraint_def['table'].c,
- constraint_def['fk_column'])],
- refcolumns=[constraint_def['ref_column']]).create()
-
-
-def rename_tables_with_constraints(renames, constraints, engine):
- """Renames tables with foreign key constraints.
-
- Tables are renamed after first removing constraints. The constraints are
- replaced after the rename is complete.
-
- This works on databases that don't support renaming tables that have
- constraints on them (DB2).
-
- `renames` is a dict, mapping {'to_table_name': from_table, ...}
- """
- if engine.name != 'sqlite':
- # SQLite doesn't support constraints, so nothing to remove.
- remove_constraints(constraints)
-
- for to_table_name in renames:
- from_table = renames[to_table_name]
- from_table.rename(to_table_name)
-
- if engine != 'sqlite':
- add_constraints(constraints)
-
-
-def find_migrate_repo(package=None, repo_name='migrate_repo'):
- package = package or sql
- path = os.path.abspath(os.path.join(
- os.path.dirname(package.__file__), repo_name))
- if os.path.isdir(path):
- return path
- raise exception.MigrationNotProvided(package.__name__, path)
-
-
-def _sync_common_repo(version):
- abs_path = find_migrate_repo()
- init_version = get_init_version()
- with sql.session_for_write() as session:
- engine = session.get_bind()
- _assert_not_schema_downgrade(version=version)
- migration.db_sync(engine, abs_path, version=version,
- init_version=init_version, sanity_check=False)
-
-
-def get_init_version(abs_path=None):
- """Get the initial version of a migrate repository
-
- :param abs_path: Absolute path to migrate repository.
- :return: initial version number or None, if DB is empty.
- """
- if abs_path is None:
- abs_path = find_migrate_repo()
-
- repo = migrate.versioning.repository.Repository(abs_path)
-
- # Sadly, Repository has a `latest` but not an `oldest`.
- # The value is a VerNum object which needs to be converted into an int.
- oldest = int(min(repo.versions.versions))
-
- if oldest < 1:
- return None
-
- # The initial version is one less
- return oldest - 1
-
-
-def _assert_not_schema_downgrade(extension=None, version=None):
- if version is not None:
- try:
- current_ver = int(six.text_type(get_db_version(extension)))
- if int(version) < current_ver:
- raise migration.exception.DbMigrationError(
- _("Unable to downgrade schema"))
- except exceptions.DatabaseNotControlledError: # nosec
- # NOTE(morganfainberg): The database is not controlled, this action
- # cannot be a downgrade.
- pass
-
-
-def _sync_extension_repo(extension, version):
- if extension in MIGRATED_EXTENSIONS:
- raise exception.MigrationMovedFailure(extension=extension)
-
- with sql.session_for_write() as session:
- engine = session.get_bind()
-
- try:
- package_name = '.'.join((contrib.__name__, extension))
- package = importutils.import_module(package_name)
- except ImportError:
- raise ImportError(_("%s extension does not exist.")
- % package_name)
- try:
- abs_path = find_migrate_repo(package)
- try:
- migration.db_version_control(engine, abs_path)
- # Register the repo with the version control API
- # If it already knows about the repo, it will throw
- # an exception that we can safely ignore
- except exceptions.DatabaseAlreadyControlledError: # nosec
- pass
- except exception.MigrationNotProvided as e:
- print(e)
- sys.exit(1)
-
- _assert_not_schema_downgrade(extension=extension, version=version)
-
- init_version = get_init_version(abs_path=abs_path)
-
- migration.db_sync(engine, abs_path, version=version,
- init_version=init_version, sanity_check=False)
-
-
-def sync_database_to_version(extension=None, version=None):
- if not extension:
- _sync_common_repo(version)
- # If version is greater than 0, it is for the common
- # repository only, and only that will be synchronized.
- if version is None:
- for default_extension in DEFAULT_EXTENSIONS:
- _sync_extension_repo(default_extension, version)
- else:
- _sync_extension_repo(extension, version)
-
-
-def get_db_version(extension=None):
- if not extension:
- with sql.session_for_write() as session:
- return migration.db_version(session.get_bind(),
- find_migrate_repo(),
- get_init_version())
-
- try:
- package_name = '.'.join((contrib.__name__, extension))
- package = importutils.import_module(package_name)
- except ImportError:
- raise ImportError(_("%s extension does not exist.")
- % package_name)
-
- with sql.session_for_write() as session:
- return migration.db_version(
- session.get_bind(), find_migrate_repo(package), 0)
-
-
-def print_db_version(extension=None):
- try:
- db_version = get_db_version(extension=extension)
- print(db_version)
- except exception.MigrationNotProvided as e:
- print(e)
- sys.exit(1)
diff --git a/keystone-moon/keystone/common/tokenless_auth.py b/keystone-moon/keystone/common/tokenless_auth.py
deleted file mode 100644
index fd9c1592..00000000
--- a/keystone-moon/keystone/common/tokenless_auth.py
+++ /dev/null
@@ -1,192 +0,0 @@
-# Copyright 2015 Hewlett-Packard
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import hashlib
-
-from oslo_config import cfg
-from oslo_log import log
-
-from keystone.auth import controllers
-from keystone.common import dependency
-from keystone import exception
-from keystone.federation import constants as federation_constants
-from keystone.federation import utils
-from keystone.i18n import _
-
-
-CONF = cfg.CONF
-LOG = log.getLogger(__name__)
-
-
-@dependency.requires('assignment_api', 'federation_api',
- 'identity_api', 'resource_api')
-class TokenlessAuthHelper(object):
- def __init__(self, env):
- """A init class for TokenlessAuthHelper.
-
- :param env: The HTTP request environment that should contain
- client certificate attributes. These attributes should match
- with what the mapping defines. Or a user cannot be mapped and
- results un-authenticated. The following examples are for the
- attributes that reference to the client certificate's Subject's
- Common Name and Organization:
- SSL_CLIENT_S_DN_CN, SSL_CLIENT_S_DN_O
- :type env: dict
- """
- self.env = env
-
- def _build_scope_info(self):
- """Build the token request scope based on the headers.
-
- :returns: scope data
- :rtype: dict
- """
- project_id = self.env.get('HTTP_X_PROJECT_ID')
- project_name = self.env.get('HTTP_X_PROJECT_NAME')
- project_domain_id = self.env.get('HTTP_X_PROJECT_DOMAIN_ID')
- project_domain_name = self.env.get('HTTP_X_PROJECT_DOMAIN_NAME')
- domain_id = self.env.get('HTTP_X_DOMAIN_ID')
- domain_name = self.env.get('HTTP_X_DOMAIN_NAME')
-
- scope = {}
- if project_id:
- scope['project'] = {'id': project_id}
- elif project_name:
- scope['project'] = {'name': project_name}
- if project_domain_id:
- scope['project']['domain'] = {'id': project_domain_id}
- elif project_domain_name:
- scope['project']['domain'] = {'name': project_domain_name}
- else:
- msg = _('Neither Project Domain ID nor Project Domain Name '
- 'was provided.')
- raise exception.ValidationError(msg)
- elif domain_id:
- scope['domain'] = {'id': domain_id}
- elif domain_name:
- scope['domain'] = {'name': domain_name}
- else:
- raise exception.ValidationError(
- attribute='project or domain',
- target='scope')
- return scope
-
- def get_scope(self):
- auth = {}
- # NOTE(chioleong): Auth methods here are insignificant because
- # we only care about using auth.controllers.AuthInfo
- # to validate the scope information. Therefore,
- # we don't provide any identity.
- auth['scope'] = self._build_scope_info()
-
- # NOTE(chioleong): We'll let AuthInfo validate the scope for us
- auth_info = controllers.AuthInfo.create({}, auth, scope_only=True)
- return auth_info.get_scope()
-
- def get_mapped_user(self, project_id=None, domain_id=None):
- """Map client certificate to an existing user.
-
- If user is ephemeral, there is no validation on the user himself;
- however it will be mapped to a corresponding group(s) and the scope
- of this ephemeral user is the same as what is assigned to the group.
-
- :param project_id: Project scope of the mapped user.
- :param domain_id: Domain scope of the mapped user.
- :returns: A dictionary that contains the keys, such as
- user_id, user_name, domain_id, domain_name
- :rtype: dict
- """
- idp_id = self._build_idp_id()
- LOG.debug('The IdP Id %s and protocol Id %s are used to look up '
- 'the mapping.', idp_id, CONF.tokenless_auth.protocol)
-
- mapped_properties, mapping_id = self.federation_api.evaluate(
- idp_id, CONF.tokenless_auth.protocol, self.env)
-
- user = mapped_properties.get('user', {})
- user_id = user.get('id')
- user_name = user.get('name')
- user_type = user.get('type')
- if user.get('domain') is not None:
- user_domain_id = user.get('domain').get('id')
- user_domain_name = user.get('domain').get('name')
- else:
- user_domain_id = None
- user_domain_name = None
-
- # if user is ephemeral type, we don't care if the user exists
- # or not, but just care if the mapped group(s) is valid.
- if user_type == utils.UserType.EPHEMERAL:
- user_ref = {'type': utils.UserType.EPHEMERAL}
- group_ids = mapped_properties['group_ids']
- utils.validate_groups_in_backend(group_ids,
- mapping_id,
- self.identity_api)
- group_ids.extend(
- utils.transform_to_group_ids(
- mapped_properties['group_names'], mapping_id,
- self.identity_api, self.assignment_api))
- roles = self.assignment_api.get_roles_for_groups(group_ids,
- project_id,
- domain_id)
- if roles is not None:
- role_names = [role['name'] for role in roles]
- user_ref['roles'] = role_names
- user_ref['group_ids'] = list(group_ids)
- user_ref[federation_constants.IDENTITY_PROVIDER] = idp_id
- user_ref[federation_constants.PROTOCOL] = (
- CONF.tokenless_auth.protocol)
- return user_ref
-
- if user_id:
- user_ref = self.identity_api.get_user(user_id)
- elif user_name and (user_domain_name or user_domain_id):
- if user_domain_name:
- user_domain = self.resource_api.get_domain_by_name(
- user_domain_name)
- self.resource_api.assert_domain_enabled(user_domain['id'],
- user_domain)
- user_domain_id = user_domain['id']
- user_ref = self.identity_api.get_user_by_name(user_name,
- user_domain_id)
- else:
- msg = _('User auth cannot be built due to missing either '
- 'user id, or user name with domain id, or user name '
- 'with domain name.')
- raise exception.ValidationError(msg)
- self.identity_api.assert_user_enabled(
- user_id=user_ref['id'],
- user=user_ref)
- user_ref['type'] = utils.UserType.LOCAL
- return user_ref
-
- def _build_idp_id(self):
- """Build the IdP name from the given config option issuer_attribute.
-
- The default issuer attribute SSL_CLIENT_I_DN in the environment is
- built with the following formula -
-
- base64_idp = sha1(env['SSL_CLIENT_I_DN'])
-
- :returns: base64_idp like the above example
- :rtype: str
- """
- idp = self.env.get(CONF.tokenless_auth.issuer_attribute)
- if idp is None:
- raise exception.TokenlessAuthConfigError(
- issuer_attribute=CONF.tokenless_auth.issuer_attribute)
-
- hashed_idp = hashlib.sha256(idp.encode('utf-8'))
- return hashed_idp.hexdigest()
diff --git a/keystone-moon/keystone/common/utils.py b/keystone-moon/keystone/common/utils.py
deleted file mode 100644
index 5438ad43..00000000
--- a/keystone-moon/keystone/common/utils.py
+++ /dev/null
@@ -1,598 +0,0 @@
-# Copyright 2012 OpenStack Foundation
-# Copyright 2010 United States Government as represented by the
-# Administrator of the National Aeronautics and Space Administration.
-# Copyright 2011 - 2012 Justin Santa Barbara
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import calendar
-import collections
-import grp
-import hashlib
-import os
-import pwd
-import uuid
-
-from oslo_config import cfg
-from oslo_log import log
-from oslo_serialization import jsonutils
-from oslo_utils import reflection
-from oslo_utils import strutils
-from oslo_utils import timeutils
-import passlib.hash
-import six
-from six import moves
-
-from keystone.common import authorization
-from keystone import exception
-from keystone.i18n import _, _LE, _LW
-
-
-CONF = cfg.CONF
-
-LOG = log.getLogger(__name__)
-
-
-# NOTE(stevermar): This UUID must stay the same, forever, across
-# all of keystone to preserve its value as a URN namespace, which is
-# used for ID transformation.
-RESOURCE_ID_NAMESPACE = uuid.UUID('4332ecab-770b-4288-a680-b9aca3b1b153')
-
-
-def resource_uuid(value):
- """Converts input to valid UUID hex digits."""
- try:
- uuid.UUID(value)
- return value
- except ValueError:
- if len(value) <= 64:
- if six.PY2 and isinstance(value, six.text_type):
- value = value.encode('utf-8')
- return uuid.uuid5(RESOURCE_ID_NAMESPACE, value).hex
- raise ValueError(_('Length of transformable resource id > 64, '
- 'which is max allowed characters'))
-
-
-def flatten_dict(d, parent_key=''):
- """Flatten a nested dictionary
-
- Converts a dictionary with nested values to a single level flat
- dictionary, with dotted notation for each key.
-
- """
- items = []
- for k, v in d.items():
- new_key = parent_key + '.' + k if parent_key else k
- if isinstance(v, collections.MutableMapping):
- items.extend(list(flatten_dict(v, new_key).items()))
- else:
- items.append((new_key, v))
- return dict(items)
-
-
-def read_cached_file(filename, cache_info, reload_func=None):
- """Read from a file if it has been modified.
-
- :param cache_info: dictionary to hold opaque cache.
- :param reload_func: optional function to be called with data when
- file is reloaded due to a modification.
-
- :returns: data from file.
-
- """
- mtime = os.path.getmtime(filename)
- if not cache_info or mtime != cache_info.get('mtime'):
- with open(filename) as fap:
- cache_info['data'] = fap.read()
- cache_info['mtime'] = mtime
- if reload_func:
- reload_func(cache_info['data'])
- return cache_info['data']
-
-
-class SmarterEncoder(jsonutils.json.JSONEncoder):
- """Help for JSON encoding dict-like objects."""
-
- def default(self, obj):
- if not isinstance(obj, dict) and hasattr(obj, 'iteritems'):
- return dict(obj.iteritems())
- return super(SmarterEncoder, self).default(obj)
-
-
-class PKIEncoder(SmarterEncoder):
- """Special encoder to make token JSON a bit shorter."""
-
- item_separator = ','
- key_separator = ':'
-
-
-def verify_length_and_trunc_password(password):
- """Verify and truncate the provided password to the max_password_length."""
- max_length = CONF.identity.max_password_length
- try:
- if len(password) > max_length:
- if CONF.strict_password_check:
- raise exception.PasswordVerificationError(size=max_length)
- else:
- LOG.warning(
- _LW('Truncating user password to '
- '%d characters.'), max_length)
- return password[:max_length]
- else:
- return password
- except TypeError:
- raise exception.ValidationError(attribute='string', target='password')
-
-
-def hash_access_key(access):
- hash_ = hashlib.sha256()
- if not isinstance(access, six.binary_type):
- access = access.encode('utf-8')
- hash_.update(access)
- return hash_.hexdigest()
-
-
-def hash_user_password(user):
- """Hash a user dict's password without modifying the passed-in dict."""
- password = user.get('password')
- if password is None:
- return user
-
- return dict(user, password=hash_password(password))
-
-
-def hash_password(password):
- """Hash a password. Hard."""
- password_utf8 = verify_length_and_trunc_password(password).encode('utf-8')
- return passlib.hash.sha512_crypt.encrypt(
- password_utf8, rounds=CONF.crypt_strength)
-
-
-def check_password(password, hashed):
- """Check that a plaintext password matches hashed.
-
- hashpw returns the salt value concatenated with the actual hash value.
- It extracts the actual salt if this value is then passed as the salt.
-
- """
- if password is None or hashed is None:
- return False
- password_utf8 = verify_length_and_trunc_password(password).encode('utf-8')
- return passlib.hash.sha512_crypt.verify(password_utf8, hashed)
-
-
-def attr_as_boolean(val_attr):
- """Returns the boolean value, decoded from a string.
-
- We test explicitly for a value meaning False, which can be one of
- several formats as specified in oslo strutils.FALSE_STRINGS.
- All other string values (including an empty string) are treated as
- meaning True.
-
- """
- return strutils.bool_from_string(val_attr, default=True)
-
-
-def get_blob_from_credential(credential):
- try:
- blob = jsonutils.loads(credential.blob)
- except (ValueError, TypeError):
- raise exception.ValidationError(
- message=_('Invalid blob in credential'))
- if not blob or not isinstance(blob, dict):
- raise exception.ValidationError(attribute='blob',
- target='credential')
- return blob
-
-
-def convert_ec2_to_v3_credential(ec2credential):
- blob = {'access': ec2credential.access,
- 'secret': ec2credential.secret}
- return {'id': hash_access_key(ec2credential.access),
- 'user_id': ec2credential.user_id,
- 'project_id': ec2credential.tenant_id,
- 'blob': jsonutils.dumps(blob),
- 'type': 'ec2',
- 'extra': jsonutils.dumps({})}
-
-
-def convert_v3_to_ec2_credential(credential):
- blob = get_blob_from_credential(credential)
- return {'access': blob.get('access'),
- 'secret': blob.get('secret'),
- 'user_id': credential.user_id,
- 'tenant_id': credential.project_id,
- }
-
-
-def unixtime(dt_obj):
- """Format datetime object as unix timestamp
-
- :param dt_obj: datetime.datetime object
- :returns: float
-
- """
- return calendar.timegm(dt_obj.utctimetuple())
-
-
-def auth_str_equal(provided, known):
- """Constant-time string comparison.
-
- :params provided: the first string
- :params known: the second string
-
- :returns: True if the strings are equal.
-
- This function takes two strings and compares them. It is intended to be
- used when doing a comparison for authentication purposes to help guard
- against timing attacks. When using the function for this purpose, always
- provide the user-provided password as the first argument. The time this
- function will take is always a factor of the length of this string.
- """
- result = 0
- p_len = len(provided)
- k_len = len(known)
- for i in moves.range(p_len):
- a = ord(provided[i]) if i < p_len else 0
- b = ord(known[i]) if i < k_len else 0
- result |= a ^ b
- return (p_len == k_len) & (result == 0)
-
-
-def setup_remote_pydev_debug():
- if CONF.pydev_debug_host and CONF.pydev_debug_port:
- try:
- try:
- from pydev import pydevd
- except ImportError:
- import pydevd
-
- pydevd.settrace(CONF.pydev_debug_host,
- port=CONF.pydev_debug_port,
- stdoutToServer=True,
- stderrToServer=True)
- return True
- except Exception:
- LOG.exception(_LE(
- 'Error setting up the debug environment. Verify that the '
- 'option --debug-url has the format <host>:<port> and that a '
- 'debugger processes is listening on that port.'))
- raise
-
-
-def get_unix_user(user=None):
- """Get the uid and user name.
-
- This is a convenience utility which accepts a variety of input
- which might represent a unix user. If successful it returns the uid
- and name. Valid input is:
-
- string
- A string is first considered to be a user name and a lookup is
- attempted under that name. If no name is found then an attempt
- is made to convert the string to an integer and perform a
- lookup as a uid.
-
- int
- An integer is interpreted as a uid.
-
- None
- None is interpreted to mean use the current process's
- effective user.
-
- If the input is a valid type but no user is found a KeyError is
- raised. If the input is not a valid type a TypeError is raised.
-
- :param object user: string, int or None specifying the user to
- lookup.
-
- :returns: tuple of (uid, name)
-
- """
- if isinstance(user, six.string_types):
- try:
- user_info = pwd.getpwnam(user)
- except KeyError:
- try:
- i = int(user)
- except ValueError:
- raise KeyError("user name '%s' not found" % user)
- try:
- user_info = pwd.getpwuid(i)
- except KeyError:
- raise KeyError("user id %d not found" % i)
- elif isinstance(user, int):
- try:
- user_info = pwd.getpwuid(user)
- except KeyError:
- raise KeyError("user id %d not found" % user)
- elif user is None:
- user_info = pwd.getpwuid(os.geteuid())
- else:
- user_cls_name = reflection.get_class_name(user,
- fully_qualified=False)
- raise TypeError('user must be string, int or None; not %s (%r)' %
- (user_cls_name, user))
-
- return user_info.pw_uid, user_info.pw_name
-
-
-def get_unix_group(group=None):
- """Get the gid and group name.
-
- This is a convenience utility which accepts a variety of input
- which might represent a unix group. If successful it returns the gid
- and name. Valid input is:
-
- string
- A string is first considered to be a group name and a lookup is
- attempted under that name. If no name is found then an attempt
- is made to convert the string to an integer and perform a
- lookup as a gid.
-
- int
- An integer is interpreted as a gid.
-
- None
- None is interpreted to mean use the current process's
- effective group.
-
- If the input is a valid type but no group is found a KeyError is
- raised. If the input is not a valid type a TypeError is raised.
-
-
- :param object group: string, int or None specifying the group to
- lookup.
-
- :returns: tuple of (gid, name)
-
- """
- if isinstance(group, six.string_types):
- try:
- group_info = grp.getgrnam(group)
- except KeyError:
- # Was an int passed as a string?
- # Try converting to int and lookup by id instead.
- try:
- i = int(group)
- except ValueError:
- raise KeyError("group name '%s' not found" % group)
- try:
- group_info = grp.getgrgid(i)
- except KeyError:
- raise KeyError("group id %d not found" % i)
- elif isinstance(group, int):
- try:
- group_info = grp.getgrgid(group)
- except KeyError:
- raise KeyError("group id %d not found" % group)
- elif group is None:
- group_info = grp.getgrgid(os.getegid())
- else:
- group_cls_name = reflection.get_class_name(group,
- fully_qualified=False)
- raise TypeError('group must be string, int or None; not %s (%r)' %
- (group_cls_name, group))
-
- return group_info.gr_gid, group_info.gr_name
-
-
-def set_permissions(path, mode=None, user=None, group=None, log=None):
- """Set the ownership and permissions on the pathname.
-
- Each of the mode, user and group are optional, if None then
- that aspect is not modified.
-
- Owner and group may be specified either with a symbolic name
- or numeric id.
-
- :param string path: Pathname of directory whose existence is assured.
- :param object mode: ownership permissions flags (int) i.e. chmod,
- if None do not set.
- :param object user: set user, name (string) or uid (integer),
- if None do not set.
- :param object group: set group, name (string) or gid (integer)
- if None do not set.
- :param logger log: logging.logger object, used to emit log messages,
- if None no logging is performed.
-
- """
- if user is None:
- user_uid, user_name = None, None
- else:
- user_uid, user_name = get_unix_user(user)
-
- if group is None:
- group_gid, group_name = None, None
- else:
- group_gid, group_name = get_unix_group(group)
-
- if log:
- if mode is None:
- mode_string = str(mode)
- else:
- mode_string = oct(mode)
- log.debug("set_permissions: "
- "path='%s' mode=%s user=%s(%s) group=%s(%s)",
- path, mode_string,
- user_name, user_uid, group_name, group_gid)
-
- # Change user and group if specified
- if user_uid is not None or group_gid is not None:
- if user_uid is None:
- user_uid = -1
- if group_gid is None:
- group_gid = -1
- try:
- os.chown(path, user_uid, group_gid)
- except OSError as exc:
- raise EnvironmentError("chown('%s', %s, %s): %s" %
- (path,
- user_name, group_name,
- exc.strerror))
-
- # Change permission flags
- if mode is not None:
- try:
- os.chmod(path, mode)
- except OSError as exc:
- raise EnvironmentError("chmod('%s', %#o): %s" %
- (path, mode, exc.strerror))
-
-
-def make_dirs(path, mode=None, user=None, group=None, log=None):
- """Assure directory exists, set ownership and permissions.
-
- Assure the directory exists and optionally set its ownership
- and permissions.
-
- Each of the mode, user and group are optional, if None then
- that aspect is not modified.
-
- Owner and group may be specified either with a symbolic name
- or numeric id.
-
- :param string path: Pathname of directory whose existence is assured.
- :param object mode: ownership permissions flags (int) i.e. chmod,
- if None do not set.
- :param object user: set user, name (string) or uid (integer),
- if None do not set.
- :param object group: set group, name (string) or gid (integer)
- if None do not set.
- :param logger log: logging.logger object, used to emit log messages,
- if None no logging is performed.
-
- """
- if log:
- if mode is None:
- mode_string = str(mode)
- else:
- mode_string = oct(mode)
- log.debug("make_dirs path='%s' mode=%s user=%s group=%s",
- path, mode_string, user, group)
-
- if not os.path.exists(path):
- try:
- os.makedirs(path)
- except OSError as exc:
- raise EnvironmentError("makedirs('%s'): %s" % (path, exc.strerror))
-
- set_permissions(path, mode, user, group, log)
-
-
-class WhiteListedItemFilter(object):
-
- def __init__(self, whitelist, data):
- self._whitelist = set(whitelist or [])
- self._data = data
-
- def __getitem__(self, name):
- if name not in self._whitelist:
- raise KeyError
- return self._data[name]
-
-
-_ISO8601_TIME_FORMAT_SUBSECOND = '%Y-%m-%dT%H:%M:%S.%f'
-_ISO8601_TIME_FORMAT = '%Y-%m-%dT%H:%M:%S'
-
-
-def isotime(at=None, subsecond=False):
- """Stringify time in ISO 8601 format."""
- # Python provides a similar instance method for datetime.datetime objects
- # called isoformat(). The format of the strings generated by isoformat()
- # have a couple of problems:
- # 1) The strings generated by isotime are used in tokens and other public
- # APIs that we can't change without a deprecation period. The strings
- # generated by isoformat are not the same format, so we can't just
- # change to it.
- # 2) The strings generated by isoformat do not include the microseconds if
- # the value happens to be 0. This will likely show up as random failures
- # as parsers may be written to always expect microseconds, and it will
- # parse correctly most of the time.
-
- if not at:
- at = timeutils.utcnow()
- st = at.strftime(_ISO8601_TIME_FORMAT
- if not subsecond
- else _ISO8601_TIME_FORMAT_SUBSECOND)
- tz = at.tzinfo.tzname(None) if at.tzinfo else 'UTC'
- st += ('Z' if tz == 'UTC' else tz)
- return st
-
-
-def strtime():
- at = timeutils.utcnow()
- return at.strftime(timeutils.PERFECT_TIME_FORMAT)
-
-
-def get_token_ref(context):
- """Retrieves KeystoneToken object from the auth context and returns it.
-
- :param dict context: The request context.
- :raises keystone.exception.Unauthorized: If auth context cannot be found.
- :returns: The KeystoneToken object.
- """
- try:
- # Retrieve the auth context that was prepared by AuthContextMiddleware.
- auth_context = (context['environment']
- [authorization.AUTH_CONTEXT_ENV])
- return auth_context['token']
- except KeyError:
- LOG.warning(_LW("Couldn't find the auth context."))
- raise exception.Unauthorized()
-
-
-URL_RESERVED_CHARS = ":/?#[]@!$&'()*+,;="
-
-
-def is_not_url_safe(name):
- """Check if a string contains any url reserved characters."""
- return len(list_url_unsafe_chars(name)) > 0
-
-
-def list_url_unsafe_chars(name):
- """Return a list of the reserved characters."""
- reserved_chars = ''
- for i in name:
- if i in URL_RESERVED_CHARS:
- reserved_chars += i
- return reserved_chars
-
-
-def lower_case_hostname(url):
- """Change the URL's hostname to lowercase"""
- # NOTE(gyee): according to
- # https://www.w3.org/TR/WD-html40-970708/htmlweb.html, the netloc portion
- # of the URL is case-insensitive
- parsed = moves.urllib.parse.urlparse(url)
- # Note: _replace method for named tuples is public and defined in docs
- replaced = parsed._replace(netloc=parsed.netloc.lower())
- return moves.urllib.parse.urlunparse(replaced)
-
-
-def remove_standard_port(url):
- # remove the default ports specified in RFC2616 and 2818
- o = moves.urllib.parse.urlparse(url)
- separator = ':'
- (host, separator, port) = o.netloc.partition(':')
- if o.scheme.lower() == 'http' and port == '80':
- # NOTE(gyee): _replace() is not a private method. It has an
- # an underscore prefix to prevent conflict with field names.
- # See https://docs.python.org/2/library/collections.html#
- # collections.namedtuple
- o = o._replace(netloc=host)
- if o.scheme.lower() == 'https' and port == '443':
- o = o._replace(netloc=host)
-
- return moves.urllib.parse.urlunparse(o)
diff --git a/keystone-moon/keystone/common/validation/__init__.py b/keystone-moon/keystone/common/validation/__init__.py
deleted file mode 100644
index 9d812f40..00000000
--- a/keystone-moon/keystone/common/validation/__init__.py
+++ /dev/null
@@ -1,96 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-"""Request body validating middleware for OpenStack Identity resources."""
-
-import functools
-import inspect
-
-from keystone.common.validation import validators
-from keystone import exception
-from keystone.i18n import _
-
-
-def validated(request_body_schema, resource_to_validate):
- """Register a schema to validate a resource reference.
-
- Registered schema will be used for validating a request body just before
- API method execution.
-
- :param request_body_schema: a schema to validate the resource reference
- :param resource_to_validate: the reference to validate
- :raises keystone.exception.ValidationError: if `resource_to_validate` is
- None. (see wrapper method below).
- :raises TypeError: at decoration time when the expected resource to
- validate isn't found in the decorated method's
- signature
-
- """
- schema_validator = validators.SchemaValidator(request_body_schema)
-
- def add_validator(func):
- argspec = inspect.getargspec(func)
- try:
- arg_index = argspec.args.index(resource_to_validate)
- except ValueError:
- raise TypeError(_('validated expected to find %(param_name)r in '
- 'function signature for %(func_name)r.') %
- {'param_name': resource_to_validate,
- 'func_name': func.__name__})
-
- @functools.wraps(func)
- def wrapper(*args, **kwargs):
- if (resource_to_validate in kwargs and
- kwargs[resource_to_validate] is not None):
- schema_validator.validate(kwargs[resource_to_validate])
- else:
- try:
- resource = args[arg_index]
- # If the resource to be validated is not None but
- # empty, it is possible to be validated by jsonschema.
- if resource is not None:
- schema_validator.validate(resource)
- else:
- raise exception.ValidationError(
- attribute=resource_to_validate,
- target='request body')
- # We cannot find the resource neither from kwargs nor args.
- except IndexError:
- raise exception.ValidationError(
- attribute=resource_to_validate,
- target='request body')
- return func(*args, **kwargs)
- return wrapper
- return add_validator
-
-
-def nullable(property_schema):
- """Clone a property schema into one that is nullable.
-
- :param dict property_schema: schema to clone into a nullable schema
- :returns: a new dict schema
- """
- # TODO(dstanek): deal with the case where type is already a list; we don't
- # do that yet so I'm not wasting time on it
- new_schema = property_schema.copy()
- new_schema['type'] = [property_schema['type'], 'null']
- return new_schema
-
-
-def add_array_type(property_schema):
- """Convert the parameter schema to be of type list.
-
- :param dict property_schema: schema to add array type to
- :returns: a new dict schema
- """
- new_schema = property_schema.copy()
- new_schema['type'] = [property_schema['type'], 'array']
- return new_schema
diff --git a/keystone-moon/keystone/common/validation/parameter_types.py b/keystone-moon/keystone/common/validation/parameter_types.py
deleted file mode 100644
index c0753827..00000000
--- a/keystone-moon/keystone/common/validation/parameter_types.py
+++ /dev/null
@@ -1,70 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-"""Common parameter types for validating a request reference."""
-
-boolean = {
- 'type': 'boolean',
- 'enum': [True, False]
-}
-
-# NOTE(lbragstad): Be mindful of this pattern as it might require changes
-# once this is used on user names, LDAP-based user names specifically since
-# commas aren't allowed in the following pattern. Here we are only going to
-# check the length of the name and ensure that it's a string. Right now we are
-# not going to validate on a naming pattern for issues with
-# internationalization.
-name = {
- 'type': 'string',
- 'minLength': 1,
- 'maxLength': 255
-}
-
-external_id_string = {
- 'type': 'string',
- 'minLength': 1,
- 'maxLength': 64
-}
-
-id_string = {
- 'type': 'string',
- 'minLength': 1,
- 'maxLength': 64,
- # TODO(lbragstad): Find a way to make this configurable such that the end
- # user chooses how much control they want over id_strings with a regex
- 'pattern': '^[a-zA-Z0-9-]+$'
-}
-
-mapping_id_string = {
- 'type': 'string',
- 'minLength': 1,
- 'maxLength': 64,
- 'pattern': '^[a-zA-Z0-9-_]+$'
-}
-
-description = {
- 'type': 'string'
-}
-
-url = {
- 'type': 'string',
- 'minLength': 0,
- 'maxLength': 225,
- # NOTE(edmondsw): we could do more to validate per various RFCs, but
- # decision was made to err on the side of leniency. The following is based
- # on rfc1738 section 2.1
- 'pattern': '^[a-zA-Z0-9+.-]+:.+'
-}
-
-email = {
- 'type': 'string',
- 'format': 'email'
-}
diff --git a/keystone-moon/keystone/common/validation/validators.py b/keystone-moon/keystone/common/validation/validators.py
deleted file mode 100644
index c6d52e9a..00000000
--- a/keystone-moon/keystone/common/validation/validators.py
+++ /dev/null
@@ -1,58 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-"""Internal implementation of request body validating middleware."""
-
-import jsonschema
-
-from keystone import exception
-from keystone.i18n import _
-
-
-class SchemaValidator(object):
- """Resource reference validator class."""
-
- validator_org = jsonschema.Draft4Validator
-
- def __init__(self, schema):
- # NOTE(lbragstad): If at some point in the future we want to extend
- # our validators to include something specific we need to check for,
- # we can do it here. Nova's V3 API validators extend the validator to
- # include `self._validate_minimum` and `self._validate_maximum`. This
- # would be handy if we needed to check for something the jsonschema
- # didn't by default. See the Nova V3 validator for details on how this
- # is done.
- validators = {}
- validator_cls = jsonschema.validators.extend(self.validator_org,
- validators)
- fc = jsonschema.FormatChecker()
- self.validator = validator_cls(schema, format_checker=fc)
-
- def validate(self, *args, **kwargs):
- try:
- self.validator.validate(*args, **kwargs)
- except jsonschema.ValidationError as ex:
- # NOTE: For whole OpenStack message consistency, this error
- # message has been written in a format consistent with WSME.
- if ex.path:
- # NOTE(lbragstad): Here we could think about using iter_errors
- # as a method of providing invalid parameters back to the
- # user.
- # TODO(lbragstad): If the value of a field is confidential or
- # too long, then we should build the masking in here so that
- # we don't expose sensitive user information in the event it
- # fails validation.
- detail = _("Invalid input for field '%(path)s'. The value is "
- "'%(value)s'.") % {'path': ex.path.pop(),
- 'value': ex.instance}
- else:
- detail = ex.message
- raise exception.SchemaValidationError(detail=detail)
diff --git a/keystone-moon/keystone/common/wsgi.py b/keystone-moon/keystone/common/wsgi.py
deleted file mode 100644
index 04528a0c..00000000
--- a/keystone-moon/keystone/common/wsgi.py
+++ /dev/null
@@ -1,834 +0,0 @@
-# Copyright 2012 OpenStack Foundation
-# Copyright 2010 United States Government as represented by the
-# Administrator of the National Aeronautics and Space Administration.
-# Copyright 2010 OpenStack Foundation
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-"""Utility methods for working with WSGI servers."""
-
-import copy
-import itertools
-import re
-import wsgiref.util
-
-from oslo_config import cfg
-import oslo_i18n
-from oslo_log import log
-from oslo_serialization import jsonutils
-from oslo_utils import importutils
-from oslo_utils import strutils
-import routes.middleware
-import six
-import webob.dec
-import webob.exc
-
-from keystone.common import dependency
-from keystone.common import json_home
-from keystone.common import utils
-from keystone import exception
-from keystone.i18n import _
-from keystone.i18n import _LI
-from keystone.i18n import _LW
-from keystone.models import token_model
-
-
-CONF = cfg.CONF
-LOG = log.getLogger(__name__)
-
-# Environment variable used to pass the request context
-CONTEXT_ENV = 'openstack.context'
-
-# Environment variable used to pass the request params
-PARAMS_ENV = 'openstack.params'
-
-JSON_ENCODE_CONTENT_TYPES = set(['application/json',
- 'application/json-home'])
-
-
-def validate_token_bind(context, token_ref):
- bind_mode = CONF.token.enforce_token_bind
-
- if bind_mode == 'disabled':
- return
-
- if not isinstance(token_ref, token_model.KeystoneToken):
- raise exception.UnexpectedError(_('token reference must be a '
- 'KeystoneToken type, got: %s') %
- type(token_ref))
- bind = token_ref.bind
-
- # permissive and strict modes don't require there to be a bind
- permissive = bind_mode in ('permissive', 'strict')
-
- if not bind:
- if permissive:
- # no bind provided and none required
- return
- else:
- LOG.info(_LI("No bind information present in token"))
- raise exception.Unauthorized()
-
- # get the named mode if bind_mode is not one of the known
- name = None if permissive or bind_mode == 'required' else bind_mode
-
- if name and name not in bind:
- LOG.info(_LI("Named bind mode %s not in bind information"), name)
- raise exception.Unauthorized()
-
- for bind_type, identifier in bind.items():
- if bind_type == 'kerberos':
- if not (context['environment'].get('AUTH_TYPE', '').lower()
- == 'negotiate'):
- LOG.info(_LI("Kerberos credentials required and not present"))
- raise exception.Unauthorized()
-
- if not context['environment'].get('REMOTE_USER') == identifier:
- LOG.info(_LI("Kerberos credentials do not match "
- "those in bind"))
- raise exception.Unauthorized()
-
- LOG.info(_LI("Kerberos bind authentication successful"))
-
- elif bind_mode == 'permissive':
- LOG.debug(("Ignoring unknown bind for permissive mode: "
- "{%(bind_type)s: %(identifier)s}"),
- {'bind_type': bind_type, 'identifier': identifier})
- else:
- LOG.info(_LI("Couldn't verify unknown bind: "
- "{%(bind_type)s: %(identifier)s}"),
- {'bind_type': bind_type, 'identifier': identifier})
- raise exception.Unauthorized()
-
-
-def best_match_language(req):
- """Determines the best available locale.
-
- This returns best available locale based on the Accept-Language HTTP
- header passed in the request.
- """
- if not req.accept_language:
- return None
- return req.accept_language.best_match(
- oslo_i18n.get_available_languages('keystone'))
-
-
-class BaseApplication(object):
- """Base WSGI application wrapper. Subclasses need to implement __call__."""
-
- @classmethod
- def factory(cls, global_config, **local_config):
- """Used for paste app factories in paste.deploy config files.
-
- Any local configuration (that is, values under the [app:APPNAME]
- section of the paste config) will be passed into the `__init__` method
- as kwargs.
-
- A hypothetical configuration would look like:
-
- [app:wadl]
- latest_version = 1.3
- paste.app_factory = keystone.fancy_api:Wadl.factory
-
- which would result in a call to the `Wadl` class as
-
- import keystone.fancy_api
- keystone.fancy_api.Wadl(latest_version='1.3')
-
- You could of course re-implement the `factory` method in subclasses,
- but using the kwarg passing it shouldn't be necessary.
-
- """
- return cls(**local_config)
-
- def __call__(self, environ, start_response):
- r"""Subclasses will probably want to implement __call__ like this:
-
- @webob.dec.wsgify()
- def __call__(self, req):
- # Any of the following objects work as responses:
-
- # Option 1: simple string
- res = 'message\n'
-
- # Option 2: a nicely formatted HTTP exception page
- res = exc.HTTPForbidden(explanation='Nice try')
-
- # Option 3: a webob Response object (in case you need to play with
- # headers, or you want to be treated like an iterable, or or or)
- res = Response();
- res.app_iter = open('somefile')
-
- # Option 4: any wsgi app to be run next
- res = self.application
-
- # Option 5: you can get a Response object for a wsgi app, too, to
- # play with headers etc
- res = req.get_response(self.application)
-
- # You can then just return your response...
- return res
- # ... or set req.response and return None.
- req.response = res
-
- See the end of http://pythonpaste.org/webob/modules/dec.html
- for more info.
-
- """
- raise NotImplementedError('You must implement __call__')
-
-
-@dependency.requires('assignment_api', 'policy_api', 'token_provider_api')
-class Application(BaseApplication):
- @webob.dec.wsgify()
- def __call__(self, req):
- arg_dict = req.environ['wsgiorg.routing_args'][1]
- action = arg_dict.pop('action')
- del arg_dict['controller']
-
- # allow middleware up the stack to provide context, params and headers.
- context = req.environ.get(CONTEXT_ENV, {})
-
- try:
- context['query_string'] = dict(req.params.items())
- except UnicodeDecodeError as e:
- # The webob package throws UnicodeError when a request cannot be
- # decoded. Raise ValidationError instead to avoid an UnknownError.
- msg = _('Query string is not UTF-8 encoded')
- raise exception.ValidationError(msg)
-
- context['headers'] = dict(req.headers.items())
- context['path'] = req.environ['PATH_INFO']
- scheme = req.environ.get(CONF.secure_proxy_ssl_header)
- if scheme:
- # NOTE(andrey-mp): "wsgi.url_scheme" contains the protocol used
- # before the proxy removed it ('https' usually). So if
- # the webob.Request instance is modified in order to use this
- # scheme instead of the one defined by API, the call to
- # webob.Request.relative_url() will return a URL with the correct
- # scheme.
- req.environ['wsgi.url_scheme'] = scheme
- context['host_url'] = req.host_url
- params = req.environ.get(PARAMS_ENV, {})
- # authentication and authorization attributes are set as environment
- # values by the container and processed by the pipeline. The complete
- # set is not yet known.
- context['environment'] = req.environ
- context['accept_header'] = req.accept
- req.environ = None
-
- params.update(arg_dict)
-
- context.setdefault('is_admin', False)
-
- # TODO(termie): do some basic normalization on methods
- method = getattr(self, action)
-
- # NOTE(morganfainberg): use the request method to normalize the
- # response code between GET and HEAD requests. The HTTP status should
- # be the same.
- LOG.info('%(req_method)s %(uri)s', {
- 'req_method': req.environ['REQUEST_METHOD'].upper(),
- 'uri': wsgiref.util.request_uri(req.environ),
- })
-
- params = self._normalize_dict(params)
-
- try:
- result = method(context, **params)
- except exception.Unauthorized as e:
- LOG.warning(
- _LW("Authorization failed. %(exception)s from "
- "%(remote_addr)s"),
- {'exception': e, 'remote_addr': req.environ['REMOTE_ADDR']})
- return render_exception(e, context=context,
- user_locale=best_match_language(req))
- except exception.Error as e:
- LOG.warning(six.text_type(e))
- return render_exception(e, context=context,
- user_locale=best_match_language(req))
- except TypeError as e:
- LOG.exception(six.text_type(e))
- return render_exception(exception.ValidationError(e),
- context=context,
- user_locale=best_match_language(req))
- except Exception as e:
- LOG.exception(six.text_type(e))
- return render_exception(exception.UnexpectedError(exception=e),
- context=context,
- user_locale=best_match_language(req))
-
- if result is None:
- return render_response(status=(204, 'No Content'))
- elif isinstance(result, six.string_types):
- return result
- elif isinstance(result, webob.Response):
- return result
- elif isinstance(result, webob.exc.WSGIHTTPException):
- return result
-
- response_code = self._get_response_code(req)
- return render_response(body=result, status=response_code,
- method=req.environ['REQUEST_METHOD'])
-
- def _get_response_code(self, req):
- req_method = req.environ['REQUEST_METHOD']
- controller = importutils.import_class('keystone.common.controller')
- code = None
- if isinstance(self, controller.V3Controller) and req_method == 'POST':
- code = (201, 'Created')
- return code
-
- def _normalize_arg(self, arg):
- return arg.replace(':', '_').replace('-', '_')
-
- def _normalize_dict(self, d):
- return {self._normalize_arg(k): v for (k, v) in d.items()}
-
- def assert_admin(self, context):
- """Ensure the user is an admin.
-
- :raises keystone.exception.Unauthorized: if a token could not be
- found/authorized, a user is invalid, or a tenant is
- invalid/not scoped.
- :raises keystone.exception.Forbidden: if the user is not an admin and
- does not have the admin role
-
- """
- if not context['is_admin']:
- user_token_ref = utils.get_token_ref(context)
-
- validate_token_bind(context, user_token_ref)
- creds = copy.deepcopy(user_token_ref.metadata)
-
- try:
- creds['user_id'] = user_token_ref.user_id
- except exception.UnexpectedError:
- LOG.debug('Invalid user')
- raise exception.Unauthorized()
-
- if user_token_ref.project_scoped:
- creds['tenant_id'] = user_token_ref.project_id
- else:
- LOG.debug('Invalid tenant')
- raise exception.Unauthorized()
-
- creds['roles'] = user_token_ref.role_names
- # Accept either is_admin or the admin role
- self.policy_api.enforce(creds, 'admin_required', {})
-
- def _attribute_is_empty(self, ref, attribute):
- """Determine if the attribute in ref is empty or None."""
- return ref.get(attribute) is None or ref.get(attribute) == ''
-
- def _require_attribute(self, ref, attribute):
- """Ensures the reference contains the specified attribute.
-
- Raise a ValidationError if the given attribute is not present
- """
- if self._attribute_is_empty(ref, attribute):
- msg = _('%s field is required and cannot be empty') % attribute
- raise exception.ValidationError(message=msg)
-
- def _require_attributes(self, ref, attrs):
- """Ensures the reference contains the specified attributes.
-
- Raise a ValidationError if any of the given attributes is not present
- """
- missing_attrs = [attribute for attribute in attrs
- if self._attribute_is_empty(ref, attribute)]
-
- if missing_attrs:
- msg = _('%s field(s) cannot be empty') % ', '.join(missing_attrs)
- raise exception.ValidationError(message=msg)
-
- def _get_trust_id_for_request(self, context):
- """Get the trust_id for a call.
-
- Retrieve the trust_id from the token
- Returns None if token is not trust scoped
- """
- if ('token_id' not in context or
- context.get('token_id') == CONF.admin_token):
- LOG.debug(('will not lookup trust as the request auth token is '
- 'either absent or it is the system admin token'))
- return None
- token_ref = utils.get_token_ref(context)
- return token_ref.trust_id
-
- @classmethod
- def base_url(cls, context, endpoint_type):
- url = CONF['%s_endpoint' % endpoint_type]
-
- if url:
- substitutions = dict(
- itertools.chain(CONF.items(), CONF.eventlet_server.items()))
-
- url = url % substitutions
- elif 'environment' in context:
- url = wsgiref.util.application_uri(context['environment'])
- # remove version from the URL as it may be part of SCRIPT_NAME but
- # it should not be part of base URL
- url = re.sub(r'/v(3|(2\.0))/*$', '', url)
-
- # now remove the standard port
- url = utils.remove_standard_port(url)
- else:
- # if we don't have enough information to come up with a base URL,
- # then fall back to localhost. This should never happen in
- # production environment.
- url = 'http://localhost:%d' % CONF.eventlet_server.public_port
-
- return url.rstrip('/')
-
-
-class Middleware(Application):
- """Base WSGI middleware.
-
- These classes require an application to be
- initialized that will be called next. By default the middleware will
- simply call its wrapped app, or you can override __call__ to customize its
- behavior.
-
- """
-
- @classmethod
- def factory(cls, global_config):
- """Used for paste app factories in paste.deploy config files."""
- def _factory(app):
- return cls(app)
- return _factory
-
- def __init__(self, application):
- super(Middleware, self).__init__()
- self.application = application
-
- def process_request(self, request):
- """Called on each request.
-
- If this returns None, the next application down the stack will be
- executed. If it returns a response then that response will be returned
- and execution will stop here.
-
- """
- return None
-
- def process_response(self, request, response):
- """Do whatever you'd like to the response, based on the request."""
- return response
-
- @webob.dec.wsgify()
- def __call__(self, request):
- try:
- response = self.process_request(request)
- if response:
- return response
- response = request.get_response(self.application)
- return self.process_response(request, response)
- except exception.Error as e:
- LOG.warning(six.text_type(e))
- return render_exception(e, request=request,
- user_locale=best_match_language(request))
- except TypeError as e:
- LOG.exception(six.text_type(e))
- return render_exception(exception.ValidationError(e),
- request=request,
- user_locale=best_match_language(request))
- except Exception as e:
- LOG.exception(six.text_type(e))
- return render_exception(exception.UnexpectedError(exception=e),
- request=request,
- user_locale=best_match_language(request))
-
-
-class Debug(Middleware):
- """Helper class for debugging a WSGI application.
-
- Can be inserted into any WSGI application chain to get information
- about the request and response.
-
- """
-
- @webob.dec.wsgify()
- def __call__(self, req):
- if not hasattr(LOG, 'isEnabledFor') or LOG.isEnabledFor(LOG.debug):
- LOG.debug('%s %s %s', ('*' * 20), 'REQUEST ENVIRON', ('*' * 20))
- for key, value in req.environ.items():
- LOG.debug('%s = %s', key,
- strutils.mask_password(value))
- LOG.debug('')
- LOG.debug('%s %s %s', ('*' * 20), 'REQUEST BODY', ('*' * 20))
- for line in req.body_file:
- LOG.debug('%s', strutils.mask_password(line))
- LOG.debug('')
-
- resp = req.get_response(self.application)
- if not hasattr(LOG, 'isEnabledFor') or LOG.isEnabledFor(LOG.debug):
- LOG.debug('%s %s %s', ('*' * 20), 'RESPONSE HEADERS', ('*' * 20))
- for (key, value) in resp.headers.items():
- LOG.debug('%s = %s', key, value)
- LOG.debug('')
-
- resp.app_iter = self.print_generator(resp.app_iter)
-
- return resp
-
- @staticmethod
- def print_generator(app_iter):
- """Iterator that prints the contents of a wrapper string."""
- LOG.debug('%s %s %s', ('*' * 20), 'RESPONSE BODY', ('*' * 20))
- for part in app_iter:
- LOG.debug(part)
- yield part
-
-
-class Router(object):
- """WSGI middleware that maps incoming requests to WSGI apps."""
-
- def __init__(self, mapper):
- """Create a router for the given routes.Mapper.
-
- Each route in `mapper` must specify a 'controller', which is a
- WSGI app to call. You'll probably want to specify an 'action' as
- well and have your controller be an object that can route
- the request to the action-specific method.
-
- Examples:
- mapper = routes.Mapper()
- sc = ServerController()
-
- # Explicit mapping of one route to a controller+action
- mapper.connect(None, '/svrlist', controller=sc, action='list')
-
- # Actions are all implicitly defined
- mapper.resource('server', 'servers', controller=sc)
-
- # Pointing to an arbitrary WSGI app. You can specify the
- # {path_info:.*} parameter so the target app can be handed just that
- # section of the URL.
- mapper.connect(None, '/v1.0/{path_info:.*}', controller=BlogApp())
-
- """
- self.map = mapper
- self._router = routes.middleware.RoutesMiddleware(self._dispatch,
- self.map)
-
- @webob.dec.wsgify()
- def __call__(self, req):
- """Route the incoming request to a controller based on self.map.
-
- If no match, return a 404.
-
- """
- return self._router
-
- @staticmethod
- @webob.dec.wsgify()
- def _dispatch(req):
- """Dispatch the request to the appropriate controller.
-
- Called by self._router after matching the incoming request to a route
- and putting the information into req.environ. Either returns 404
- or the routed WSGI app's response.
-
- """
- match = req.environ['wsgiorg.routing_args'][1]
- if not match:
- msg = _('The resource could not be found.')
- return render_exception(exception.NotFound(msg),
- request=req,
- user_locale=best_match_language(req))
- app = match['controller']
- return app
-
-
-class ComposingRouter(Router):
- def __init__(self, mapper=None, routers=None):
- if mapper is None:
- mapper = routes.Mapper()
- if routers is None:
- routers = []
- for router in routers:
- router.add_routes(mapper)
- super(ComposingRouter, self).__init__(mapper)
-
-
-class ComposableRouter(Router):
- """Router that supports use by ComposingRouter."""
-
- def __init__(self, mapper=None):
- if mapper is None:
- mapper = routes.Mapper()
- self.add_routes(mapper)
- super(ComposableRouter, self).__init__(mapper)
-
- def add_routes(self, mapper):
- """Add routes to given mapper."""
- pass
-
-
-class ExtensionRouter(Router):
- """A router that allows extensions to supplement or overwrite routes.
-
- Expects to be subclassed.
- """
-
- def __init__(self, application, mapper=None):
- if mapper is None:
- mapper = routes.Mapper()
- self.application = application
- self.add_routes(mapper)
- mapper.connect('/{path_info:.*}', controller=self.application)
- super(ExtensionRouter, self).__init__(mapper)
-
- def add_routes(self, mapper):
- pass
-
- @classmethod
- def factory(cls, global_config, **local_config):
- """Used for paste app factories in paste.deploy config files.
-
- Any local configuration (that is, values under the [filter:APPNAME]
- section of the paste config) will be passed into the `__init__` method
- as kwargs.
-
- A hypothetical configuration would look like:
-
- [filter:analytics]
- redis_host = 127.0.0.1
- paste.filter_factory = keystone.analytics:Analytics.factory
-
- which would result in a call to the `Analytics` class as
-
- import keystone.analytics
- keystone.analytics.Analytics(app, redis_host='127.0.0.1')
-
- You could of course re-implement the `factory` method in subclasses,
- but using the kwarg passing it shouldn't be necessary.
-
- """
- def _factory(app):
- conf = global_config.copy()
- conf.update(local_config)
- return cls(app, **local_config)
- return _factory
-
-
-class RoutersBase(object):
- """Base class for Routers."""
-
- def __init__(self):
- self.v3_resources = []
-
- def append_v3_routers(self, mapper, routers):
- """Append v3 routers.
-
- Subclasses should override this method to map its routes.
-
- Use self._add_resource() to map routes for a resource.
- """
-
- def _add_resource(self, mapper, controller, path, rel,
- get_action=None, head_action=None, get_head_action=None,
- put_action=None, post_action=None, patch_action=None,
- delete_action=None, get_post_action=None,
- path_vars=None, status=json_home.Status.STABLE,
- new_path=None):
- if get_head_action:
- getattr(controller, get_head_action) # ensure the attribute exists
- mapper.connect(path, controller=controller, action=get_head_action,
- conditions=dict(method=['GET', 'HEAD']))
- if get_action:
- getattr(controller, get_action) # ensure the attribute exists
- mapper.connect(path, controller=controller, action=get_action,
- conditions=dict(method=['GET']))
- if head_action:
- getattr(controller, head_action) # ensure the attribute exists
- mapper.connect(path, controller=controller, action=head_action,
- conditions=dict(method=['HEAD']))
- if put_action:
- getattr(controller, put_action) # ensure the attribute exists
- mapper.connect(path, controller=controller, action=put_action,
- conditions=dict(method=['PUT']))
- if post_action:
- getattr(controller, post_action) # ensure the attribute exists
- mapper.connect(path, controller=controller, action=post_action,
- conditions=dict(method=['POST']))
- if patch_action:
- getattr(controller, patch_action) # ensure the attribute exists
- mapper.connect(path, controller=controller, action=patch_action,
- conditions=dict(method=['PATCH']))
- if delete_action:
- getattr(controller, delete_action) # ensure the attribute exists
- mapper.connect(path, controller=controller, action=delete_action,
- conditions=dict(method=['DELETE']))
- if get_post_action:
- getattr(controller, get_post_action) # ensure the attribute exists
- mapper.connect(path, controller=controller, action=get_post_action,
- conditions=dict(method=['GET', 'POST']))
-
- resource_data = dict()
-
- if path_vars:
- resource_data['href-template'] = new_path or path
- resource_data['href-vars'] = path_vars
- else:
- resource_data['href'] = new_path or path
-
- json_home.Status.update_resource_data(resource_data, status)
-
- self.v3_resources.append((rel, resource_data))
-
-
-class V3ExtensionRouter(ExtensionRouter, RoutersBase):
- """Base class for V3 extension router."""
-
- def __init__(self, application, mapper=None):
- self.v3_resources = list()
- super(V3ExtensionRouter, self).__init__(application, mapper)
-
- def _update_version_response(self, response_data):
- response_data['resources'].update(self.v3_resources)
-
- @webob.dec.wsgify()
- def __call__(self, request):
- if request.path_info != '/':
- # Not a request for version info so forward to super.
- return super(V3ExtensionRouter, self).__call__(request)
-
- response = request.get_response(self.application)
-
- if response.status_code != 200:
- # The request failed, so don't update the response.
- return response
-
- if response.headers['Content-Type'] != 'application/json-home':
- # Not a request for JSON Home document, so don't update the
- # response.
- return response
-
- response_data = jsonutils.loads(response.body)
- self._update_version_response(response_data)
- response.body = jsonutils.dump_as_bytes(response_data,
- cls=utils.SmarterEncoder)
- return response
-
-
-def render_response(body=None, status=None, headers=None, method=None):
- """Forms a WSGI response."""
- if headers is None:
- headers = []
- else:
- headers = list(headers)
- headers.append(('Vary', 'X-Auth-Token'))
-
- if body is None:
- body = b''
- status = status or (204, 'No Content')
- else:
- content_types = [v for h, v in headers if h == 'Content-Type']
- if content_types:
- content_type = content_types[0]
- else:
- content_type = None
-
- if content_type is None or content_type in JSON_ENCODE_CONTENT_TYPES:
- body = jsonutils.dump_as_bytes(body, cls=utils.SmarterEncoder)
- if content_type is None:
- headers.append(('Content-Type', 'application/json'))
- status = status or (200, 'OK')
-
- # NOTE(davechen): `mod_wsgi` follows the standards from pep-3333 and
- # requires the value in response header to be binary type(str) on python2,
- # unicode based string(str) on python3, or else keystone will not work
- # under apache with `mod_wsgi`.
- # keystone needs to check the data type of each header and convert the
- # type if needed.
- # see bug:
- # https://bugs.launchpad.net/keystone/+bug/1528981
- # see pep-3333:
- # https://www.python.org/dev/peps/pep-3333/#a-note-on-string-types
- # see source from mod_wsgi:
- # https://github.com/GrahamDumpleton/mod_wsgi(methods:
- # wsgi_convert_headers_to_bytes(...), wsgi_convert_string_to_bytes(...)
- # and wsgi_validate_header_value(...)).
- def _convert_to_str(headers):
- str_headers = []
- for header in headers:
- str_header = []
- for value in header:
- if not isinstance(value, str):
- str_header.append(str(value))
- else:
- str_header.append(value)
- # convert the list to the immutable tuple to build the headers.
- # header's key/value will be guaranteed to be str type.
- str_headers.append(tuple(str_header))
- return str_headers
-
- headers = _convert_to_str(headers)
-
- resp = webob.Response(body=body,
- status='%s %s' % status,
- headerlist=headers)
-
- if method and method.upper() == 'HEAD':
- # NOTE(morganfainberg): HEAD requests should return the same status
- # as a GET request and same headers (including content-type and
- # content-length). The webob.Response object automatically changes
- # content-length (and other headers) if the body is set to b''. Capture
- # all headers and reset them on the response object after clearing the
- # body. The body can only be set to a binary-type (not TextType or
- # NoneType), so b'' is used here and should be compatible with
- # both py2x and py3x.
- stored_headers = resp.headers.copy()
- resp.body = b''
- for header, value in stored_headers.items():
- resp.headers[header] = value
-
- return resp
-
-
-def render_exception(error, context=None, request=None, user_locale=None):
- """Forms a WSGI response based on the current error."""
- error_message = error.args[0]
- message = oslo_i18n.translate(error_message, desired_locale=user_locale)
- if message is error_message:
- # translate() didn't do anything because it wasn't a Message,
- # convert to a string.
- message = six.text_type(message)
-
- body = {'error': {
- 'code': error.code,
- 'title': error.title,
- 'message': message,
- }}
- headers = []
- if isinstance(error, exception.AuthPluginException):
- body['error']['identity'] = error.authentication
- elif isinstance(error, exception.Unauthorized):
- # NOTE(gyee): we only care about the request environment in the
- # context. Also, its OK to pass the environemt as it is read-only in
- # Application.base_url()
- local_context = {}
- if request:
- local_context = {'environment': request.environ}
- elif context and 'environment' in context:
- local_context = {'environment': context['environment']}
- url = Application.base_url(local_context, 'public')
-
- headers.append(('WWW-Authenticate', 'Keystone uri="%s"' % url))
- return render_response(status=(error.code, error.title),
- body=body,
- headers=headers)