summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--keystone-moon/.coveragerc7
-rw-r--r--keystone-moon/.mailmap28
-rw-r--r--keystone-moon/.testr.conf19
-rw-r--r--keystone-moon/CONTRIBUTING.rst15
-rw-r--r--keystone-moon/MANIFEST.in4
-rw-r--r--keystone-moon/README.rst6
-rw-r--r--keystone-moon/bandit.yaml134
-rw-r--r--keystone-moon/config-generator/keystone.conf2
-rw-r--r--keystone-moon/doc/source/apache-httpd.rst50
-rw-r--r--keystone-moon/doc/source/api_curl_examples.rst117
-rw-r--r--keystone-moon/doc/source/community.rst4
-rw-r--r--keystone-moon/doc/source/conf.py10
-rw-r--r--keystone-moon/doc/source/configuration.rst229
-rw-r--r--keystone-moon/doc/source/configure_federation.rst124
-rw-r--r--keystone-moon/doc/source/developing.rst172
-rw-r--r--keystone-moon/doc/source/extension_development.rst4
-rw-r--r--keystone-moon/doc/source/extensions.rst20
-rw-r--r--keystone-moon/doc/source/extensions/endpoint_filter.rst2
-rw-r--r--keystone-moon/doc/source/extensions/endpoint_policy.rst2
-rw-r--r--keystone-moon/doc/source/extensions/oauth1.rst3
-rw-r--r--keystone-moon/doc/source/extensions/revoke.rst2
-rw-r--r--keystone-moon/doc/source/external-auth.rst17
-rw-r--r--keystone-moon/doc/source/federation/mellon.rst122
-rw-r--r--keystone-moon/doc/source/federation/openidc.rst94
-rw-r--r--keystone-moon/doc/source/federation/shibboleth.rst279
-rw-r--r--keystone-moon/doc/source/federation/websso.rst239
-rw-r--r--keystone-moon/doc/source/http-api.rst8
-rw-r--r--keystone-moon/doc/source/index.rst3
-rw-r--r--keystone-moon/doc/source/installing.rst19
-rw-r--r--keystone-moon/doc/source/man/keystone-all.rst2
-rw-r--r--keystone-moon/doc/source/man/keystone-manage.rst10
-rw-r--r--keystone-moon/doc/source/mapping_combinations.rst597
-rw-r--r--keystone-moon/doc/source/mapping_schema.rst160
-rw-r--r--keystone-moon/doc/source/policy_mapping.rst213
-rw-r--r--keystone-moon/doc/source/setup.rst39
-rw-r--r--keystone-moon/etc/keystone-paste.ini54
-rw-r--r--keystone-moon/etc/keystone.conf.sample551
-rw-r--r--keystone-moon/etc/policy.json11
-rw-r--r--keystone-moon/etc/policy.v3cloudsample.json13
-rw-r--r--keystone-moon/httpd/README2
-rw-r--r--keystone-moon/httpd/wsgi-keystone.conf28
-rw-r--r--keystone-moon/keystone/assignment/backends/ldap.py66
-rw-r--r--keystone-moon/keystone/assignment/backends/sql.py108
-rw-r--r--keystone-moon/keystone/assignment/controllers.py385
-rw-r--r--keystone-moon/keystone/assignment/core.py660
-rw-r--r--keystone-moon/keystone/auth/controllers.py63
-rw-r--r--keystone-moon/keystone/auth/plugins/core.py5
-rw-r--r--keystone-moon/keystone/auth/plugins/external.py85
-rw-r--r--keystone-moon/keystone/auth/plugins/mapped.py62
-rw-r--r--keystone-moon/keystone/auth/plugins/oauth1.py6
-rw-r--r--keystone-moon/keystone/auth/plugins/password.py5
-rw-r--r--keystone-moon/keystone/auth/plugins/saml2.py3
-rw-r--r--keystone-moon/keystone/auth/plugins/token.py4
-rw-r--r--keystone-moon/keystone/catalog/backends/sql.py66
-rw-r--r--keystone-moon/keystone/catalog/backends/templated.py39
-rw-r--r--keystone-moon/keystone/catalog/controllers.py18
-rw-r--r--keystone-moon/keystone/catalog/core.py73
-rw-r--r--keystone-moon/keystone/catalog/schema.py4
-rw-r--r--keystone-moon/keystone/cmd/__init__.py0
-rw-r--r--keystone-moon/keystone/cmd/all.py39
-rw-r--r--keystone-moon/keystone/cmd/cli.py685
-rw-r--r--keystone-moon/keystone/cmd/manage.py47
-rw-r--r--keystone-moon/keystone/common/authorization.py1
-rw-r--r--keystone-moon/keystone/common/base64utils.py9
-rw-r--r--keystone-moon/keystone/common/cache/_memcache_pool.py23
-rw-r--r--keystone-moon/keystone/common/cache/backends/mongo.py16
-rw-r--r--keystone-moon/keystone/common/clean.py87
-rw-r--r--keystone-moon/keystone/common/config.py284
-rw-r--r--keystone-moon/keystone/common/controller.py98
-rw-r--r--keystone-moon/keystone/common/dependency.py89
-rw-r--r--keystone-moon/keystone/common/driver_hints.py5
-rw-r--r--keystone-moon/keystone/common/environment/__init__.py3
-rw-r--r--keystone-moon/keystone/common/environment/eventlet_server.py22
-rw-r--r--keystone-moon/keystone/common/json_home.py19
-rw-r--r--keystone-moon/keystone/common/kvs/backends/memcached.py25
-rw-r--r--keystone-moon/keystone/common/kvs/core.py39
-rw-r--r--keystone-moon/keystone/common/kvs/legacy.py3
-rw-r--r--keystone-moon/keystone/common/ldap/core.py171
-rw-r--r--keystone-moon/keystone/common/manager.py32
-rw-r--r--keystone-moon/keystone/common/models.py3
-rw-r--r--keystone-moon/keystone/common/openssl.py6
-rw-r--r--keystone-moon/keystone/common/sql/core.py117
-rw-r--r--keystone-moon/keystone/common/sql/migrate_repo/versions/045_placeholder.py4
-rw-r--r--keystone-moon/keystone/common/sql/migrate_repo/versions/046_placeholder.py4
-rw-r--r--keystone-moon/keystone/common/sql/migrate_repo/versions/047_placeholder.py4
-rw-r--r--keystone-moon/keystone/common/sql/migrate_repo/versions/048_placeholder.py4
-rw-r--r--keystone-moon/keystone/common/sql/migrate_repo/versions/049_placeholder.py4
-rw-r--r--keystone-moon/keystone/common/sql/migrate_repo/versions/050_fk_consistent_indexes.py14
-rw-r--r--keystone-moon/keystone/common/sql/migrate_repo/versions/051_add_id_mapping.py8
-rw-r--r--keystone-moon/keystone/common/sql/migrate_repo/versions/052_add_auth_url_to_region.py9
-rw-r--r--keystone-moon/keystone/common/sql/migrate_repo/versions/053_endpoint_to_region_association.py66
-rw-r--r--keystone-moon/keystone/common/sql/migrate_repo/versions/054_add_actor_id_index.py10
-rw-r--r--keystone-moon/keystone/common/sql/migrate_repo/versions/055_add_indexes_to_token_table.py10
-rw-r--r--keystone-moon/keystone/common/sql/migrate_repo/versions/056_placeholder.py4
-rw-r--r--keystone-moon/keystone/common/sql/migrate_repo/versions/057_placeholder.py4
-rw-r--r--keystone-moon/keystone/common/sql/migrate_repo/versions/058_placeholder.py4
-rw-r--r--keystone-moon/keystone/common/sql/migrate_repo/versions/059_placeholder.py4
-rw-r--r--keystone-moon/keystone/common/sql/migrate_repo/versions/060_placeholder.py4
-rw-r--r--keystone-moon/keystone/common/sql/migrate_repo/versions/061_add_parent_project.py15
-rw-r--r--keystone-moon/keystone/common/sql/migrate_repo/versions/062_drop_assignment_role_fk.py6
-rw-r--r--keystone-moon/keystone/common/sql/migrate_repo/versions/063_drop_region_auth_url.py10
-rw-r--r--keystone-moon/keystone/common/sql/migrate_repo/versions/064_drop_user_and_group_fk.py6
-rw-r--r--keystone-moon/keystone/common/sql/migrate_repo/versions/065_add_domain_config.py11
-rw-r--r--keystone-moon/keystone/common/sql/migrate_repo/versions/066_fixup_service_name_value.py13
-rw-r--r--keystone-moon/keystone/common/sql/migrate_repo/versions/067_drop_redundant_mysql_index.py25
-rw-r--r--keystone-moon/keystone/common/sql/migrate_repo/versions/068_placeholder.py18
-rw-r--r--keystone-moon/keystone/common/sql/migrate_repo/versions/069_placeholder.py18
-rw-r--r--keystone-moon/keystone/common/sql/migrate_repo/versions/070_placeholder.py18
-rw-r--r--keystone-moon/keystone/common/sql/migrate_repo/versions/071_placeholder.py18
-rw-r--r--keystone-moon/keystone/common/sql/migrate_repo/versions/072_placeholder.py18
-rw-r--r--keystone-moon/keystone/common/sql/migrate_repo/versions/073_insert_assignment_inherited_pk.py114
-rw-r--r--keystone-moon/keystone/common/sql/migrate_repo/versions/074_add_is_domain_project.py27
-rw-r--r--keystone-moon/keystone/common/sql/migration_helpers.py66
-rw-r--r--keystone-moon/keystone/common/utils.py79
-rw-r--r--keystone-moon/keystone/common/validation/__init__.py37
-rw-r--r--keystone-moon/keystone/common/validation/parameter_types.py6
-rw-r--r--keystone-moon/keystone/common/wsgi.py92
-rw-r--r--keystone-moon/keystone/config.py3
-rw-r--r--keystone-moon/keystone/contrib/ec2/controllers.py72
-rw-r--r--keystone-moon/keystone/contrib/endpoint_filter/backends/catalog_sql.py6
-rw-r--r--keystone-moon/keystone/contrib/endpoint_filter/backends/sql.py5
-rw-r--r--keystone-moon/keystone/contrib/endpoint_filter/controllers.py2
-rw-r--r--keystone-moon/keystone/contrib/endpoint_filter/core.py4
-rw-r--r--keystone-moon/keystone/contrib/endpoint_filter/migrate_repo/versions/001_add_endpoint_filtering_table.py9
-rw-r--r--keystone-moon/keystone/contrib/endpoint_filter/migrate_repo/versions/002_add_endpoint_groups.py10
-rw-r--r--keystone-moon/keystone/contrib/endpoint_filter/routers.py51
-rw-r--r--keystone-moon/keystone/contrib/endpoint_policy/__init__.py15
-rw-r--r--keystone-moon/keystone/contrib/endpoint_policy/backends/sql.py134
-rw-r--r--keystone-moon/keystone/contrib/endpoint_policy/migrate_repo/versions/001_add_endpoint_policy_table.py8
-rw-r--r--keystone-moon/keystone/contrib/endpoint_policy/routers.py79
-rw-r--r--keystone-moon/keystone/contrib/example/core.py11
-rw-r--r--keystone-moon/keystone/contrib/example/migrate_repo/versions/001_example_table.py11
-rw-r--r--keystone-moon/keystone/contrib/federation/backends/sql.py71
-rw-r--r--keystone-moon/keystone/contrib/federation/constants.py15
-rw-r--r--keystone-moon/keystone/contrib/federation/controllers.py142
-rw-r--r--keystone-moon/keystone/contrib/federation/core.py18
-rw-r--r--keystone-moon/keystone/contrib/federation/idp.py128
-rw-r--r--keystone-moon/keystone/contrib/federation/migrate_repo/versions/001_add_identity_provider_table.py9
-rw-r--r--keystone-moon/keystone/contrib/federation/migrate_repo/versions/002_add_mapping_tables.py10
-rw-r--r--keystone-moon/keystone/contrib/federation/migrate_repo/versions/003_mapping_id_nullable_false.py6
-rw-r--r--keystone-moon/keystone/contrib/federation/migrate_repo/versions/004_add_remote_id_column.py7
-rw-r--r--keystone-moon/keystone/contrib/federation/migrate_repo/versions/005_add_service_provider_table.py7
-rw-r--r--keystone-moon/keystone/contrib/federation/migrate_repo/versions/006_fixup_service_provider_attributes.py8
-rw-r--r--keystone-moon/keystone/contrib/federation/migrate_repo/versions/007_add_remote_id_table.py41
-rw-r--r--keystone-moon/keystone/contrib/federation/migrate_repo/versions/008_add_relay_state_to_sp.py39
-rw-r--r--keystone-moon/keystone/contrib/federation/routers.py50
-rw-r--r--keystone-moon/keystone/contrib/federation/schema.py3
-rw-r--r--keystone-moon/keystone/contrib/federation/utils.py66
-rw-r--r--keystone-moon/keystone/contrib/moon/algorithms.py34
-rw-r--r--keystone-moon/keystone/contrib/moon/backends/memory.py17
-rw-r--r--keystone-moon/keystone/contrib/moon/backends/sql.py23
-rw-r--r--keystone-moon/keystone/contrib/moon/core.py609
-rw-r--r--keystone-moon/keystone/contrib/moon/extension.py740
-rw-r--r--keystone-moon/keystone/contrib/moon/migrate_repo/versions/001_moon.py12
-rw-r--r--keystone-moon/keystone/contrib/moon/routers.py2
-rw-r--r--keystone-moon/keystone/contrib/oauth1/backends/sql.py10
-rw-r--r--keystone-moon/keystone/contrib/oauth1/controllers.py12
-rw-r--r--keystone-moon/keystone/contrib/oauth1/core.py5
-rw-r--r--keystone-moon/keystone/contrib/oauth1/migrate_repo/versions/001_add_oauth_tables.py10
-rw-r--r--keystone-moon/keystone/contrib/oauth1/migrate_repo/versions/002_fix_oauth_tables_fk.py17
-rw-r--r--keystone-moon/keystone/contrib/oauth1/migrate_repo/versions/003_consumer_description_nullalbe.py7
-rw-r--r--keystone-moon/keystone/contrib/oauth1/migrate_repo/versions/004_request_token_roles_nullable.py10
-rw-r--r--keystone-moon/keystone/contrib/oauth1/migrate_repo/versions/005_consumer_id_index.py11
-rw-r--r--keystone-moon/keystone/contrib/oauth1/routers.py12
-rw-r--r--keystone-moon/keystone/contrib/revoke/backends/kvs.py33
-rw-r--r--keystone-moon/keystone/contrib/revoke/backends/sql.py4
-rw-r--r--keystone-moon/keystone/contrib/revoke/core.py25
-rw-r--r--keystone-moon/keystone/contrib/revoke/migrate_repo/versions/001_revoke_table.py11
-rw-r--r--keystone-moon/keystone/contrib/revoke/migrate_repo/versions/002_add_audit_id_and_chain_to_revoke_table.py9
-rw-r--r--keystone-moon/keystone/contrib/revoke/model.py120
-rw-r--r--keystone-moon/keystone/contrib/s3/core.py15
-rw-r--r--keystone-moon/keystone/controllers.py8
-rw-r--r--keystone-moon/keystone/credential/core.py4
-rw-r--r--keystone-moon/keystone/endpoint_policy/__init__.py14
-rw-r--r--keystone-moon/keystone/endpoint_policy/backends/__init__.py0
-rw-r--r--keystone-moon/keystone/endpoint_policy/backends/sql.py140
-rw-r--r--keystone-moon/keystone/endpoint_policy/controllers.py166
-rw-r--r--keystone-moon/keystone/endpoint_policy/core.py433
-rw-r--r--keystone-moon/keystone/endpoint_policy/routers.py85
-rw-r--r--keystone-moon/keystone/exception.py23
-rw-r--r--keystone-moon/keystone/identity/backends/ldap.py19
-rw-r--r--keystone-moon/keystone/identity/backends/sql.py15
-rw-r--r--keystone-moon/keystone/identity/controllers.py10
-rw-r--r--keystone-moon/keystone/identity/core.py119
-rw-r--r--keystone-moon/keystone/identity/generator.py3
-rw-r--r--keystone-moon/keystone/identity/schema.py67
-rw-r--r--keystone-moon/keystone/locale/de/LC_MESSAGES/keystone-log-critical.po9
-rw-r--r--keystone-moon/keystone/locale/en_AU/LC_MESSAGES/keystone-log-critical.po9
-rw-r--r--keystone-moon/keystone/locale/en_AU/LC_MESSAGES/keystone-log-error.po149
-rw-r--r--keystone-moon/keystone/locale/en_AU/LC_MESSAGES/keystone.po1529
-rw-r--r--keystone-moon/keystone/locale/es/LC_MESSAGES/keystone-log-critical.po11
-rw-r--r--keystone-moon/keystone/locale/fr/LC_MESSAGES/keystone-log-critical.po9
-rw-r--r--keystone-moon/keystone/locale/fr/LC_MESSAGES/keystone-log-error.po157
-rw-r--r--keystone-moon/keystone/locale/fr/LC_MESSAGES/keystone-log-info.po191
-rw-r--r--keystone-moon/keystone/locale/fr/LC_MESSAGES/keystone-log-warning.po274
-rw-r--r--keystone-moon/keystone/locale/hu/LC_MESSAGES/keystone-log-critical.po9
-rw-r--r--keystone-moon/keystone/locale/it/LC_MESSAGES/keystone-log-critical.po11
-rw-r--r--keystone-moon/keystone/locale/ja/LC_MESSAGES/keystone-log-critical.po9
-rw-r--r--keystone-moon/keystone/locale/keystone-log-critical.pot10
-rw-r--r--keystone-moon/keystone/locale/keystone-log-error.pot68
-rw-r--r--keystone-moon/keystone/locale/keystone-log-info.pot114
-rw-r--r--keystone-moon/keystone/locale/keystone-log-warning.pot155
-rw-r--r--keystone-moon/keystone/locale/keystone.pot860
-rw-r--r--keystone-moon/keystone/locale/ko_KR/LC_MESSAGES/keystone-log-critical.po9
-rw-r--r--keystone-moon/keystone/locale/pl_PL/LC_MESSAGES/keystone-log-critical.po9
-rw-r--r--keystone-moon/keystone/locale/pt_BR/LC_MESSAGES/keystone-log-critical.po9
-rw-r--r--keystone-moon/keystone/locale/pt_BR/LC_MESSAGES/keystone-log-error.po153
-rw-r--r--keystone-moon/keystone/locale/pt_BR/LC_MESSAGES/keystone.po1522
-rw-r--r--keystone-moon/keystone/locale/ru/LC_MESSAGES/keystone-log-critical.po16
-rw-r--r--keystone-moon/keystone/locale/tr_TR/LC_MESSAGES/keystone-log-critical.po24
-rw-r--r--keystone-moon/keystone/locale/tr_TR/LC_MESSAGES/keystone-log-error.po163
-rw-r--r--keystone-moon/keystone/locale/tr_TR/LC_MESSAGES/keystone-log-info.po130
-rw-r--r--keystone-moon/keystone/locale/tr_TR/LC_MESSAGES/keystone-log-warning.po249
-rw-r--r--keystone-moon/keystone/locale/tr_TR/LC_MESSAGES/keystone.po1288
-rw-r--r--keystone-moon/keystone/locale/zh_CN/LC_MESSAGES/keystone-log-critical.po9
-rw-r--r--keystone-moon/keystone/locale/zh_CN/LC_MESSAGES/keystone-log-error.po169
-rw-r--r--keystone-moon/keystone/locale/zh_CN/LC_MESSAGES/keystone-log-info.po185
-rw-r--r--keystone-moon/keystone/locale/zh_TW/LC_MESSAGES/keystone-log-critical.po11
-rw-r--r--keystone-moon/keystone/middleware/core.py40
-rw-r--r--keystone-moon/keystone/models/token_model.py14
-rw-r--r--keystone-moon/keystone/notifications.py88
-rw-r--r--keystone-moon/keystone/policy/core.py3
-rw-r--r--keystone-moon/keystone/resource/backends/ldap.py22
-rw-r--r--keystone-moon/keystone/resource/backends/sql.py14
-rw-r--r--keystone-moon/keystone/resource/controllers.py43
-rw-r--r--keystone-moon/keystone/resource/core.py107
-rw-r--r--keystone-moon/keystone/resource/schema.py1
-rw-r--r--keystone-moon/keystone/server/backends.py64
-rw-r--r--keystone-moon/keystone/server/common.py2
-rw-r--r--keystone-moon/keystone/server/eventlet.py8
-rw-r--r--keystone-moon/keystone/server/wsgi.py8
-rw-r--r--keystone-moon/keystone/service.py19
-rw-r--r--keystone-moon/keystone/tests/functional/__init__.py0
-rw-r--r--keystone-moon/keystone/tests/functional/shared/__init__.py0
-rw-r--r--keystone-moon/keystone/tests/functional/shared/test_running.py50
-rw-r--r--keystone-moon/keystone/tests/hacking/__init__.py0
-rw-r--r--keystone-moon/keystone/tests/hacking/checks.py434
-rw-r--r--keystone-moon/keystone/tests/moon/unit/__init__.py10
-rw-r--r--keystone-moon/keystone/tests/moon/unit/test_unit_core_configuration.py27
-rw-r--r--keystone-moon/keystone/tests/moon/unit/test_unit_core_intra_extension_admin.py402
-rw-r--r--keystone-moon/keystone/tests/moon/unit/test_unit_core_intra_extension_authz.py515
-rw-r--r--keystone-moon/keystone/tests/moon/unit/test_unit_core_log.py58
-rw-r--r--keystone-moon/keystone/tests/moon/unit/test_unit_core_tenant.py181
-rw-r--r--keystone-moon/keystone/tests/unit/__init__.py8
-rw-r--r--keystone-moon/keystone/tests/unit/auth/__init__.py0
-rw-r--r--keystone-moon/keystone/tests/unit/auth/test_controllers.py98
-rw-r--r--keystone-moon/keystone/tests/unit/backend/core_ldap.py29
-rw-r--r--keystone-moon/keystone/tests/unit/backend/domain_config/core.py28
-rw-r--r--keystone-moon/keystone/tests/unit/catalog/test_core.py18
-rw-r--r--keystone-moon/keystone/tests/unit/common/test_connection_pool.py16
-rw-r--r--keystone-moon/keystone/tests/unit/common/test_injection.py57
-rw-r--r--keystone-moon/keystone/tests/unit/common/test_ldap.py88
-rw-r--r--keystone-moon/keystone/tests/unit/common/test_notifications.py180
-rw-r--r--keystone-moon/keystone/tests/unit/common/test_utils.py2
-rw-r--r--keystone-moon/keystone/tests/unit/config_files/backend_ldap_sql.conf2
-rw-r--r--keystone-moon/keystone/tests/unit/config_files/backend_multi_ldap_sql.conf2
-rw-r--r--keystone-moon/keystone/tests/unit/config_files/backend_mysql.conf2
-rw-r--r--keystone-moon/keystone/tests/unit/config_files/backend_sql.conf2
-rw-r--r--keystone-moon/keystone/tests/unit/config_files/domain_configs_default_ldap_one_sql/keystone.domain1.conf2
-rw-r--r--keystone-moon/keystone/tests/unit/config_files/domain_configs_multi_ldap/keystone.Default.conf2
-rw-r--r--keystone-moon/keystone/tests/unit/config_files/domain_configs_multi_ldap/keystone.domain1.conf2
-rw-r--r--keystone-moon/keystone/tests/unit/config_files/domain_configs_multi_ldap/keystone.domain2.conf2
-rw-r--r--keystone-moon/keystone/tests/unit/config_files/domain_configs_one_extra_sql/keystone.domain2.conf2
-rw-r--r--keystone-moon/keystone/tests/unit/config_files/domain_configs_one_sql_one_ldap/keystone.Default.conf2
-rw-r--r--keystone-moon/keystone/tests/unit/config_files/domain_configs_one_sql_one_ldap/keystone.domain1.conf2
-rw-r--r--keystone-moon/keystone/tests/unit/config_files/test_auth_plugin.conf3
-rw-r--r--keystone-moon/keystone/tests/unit/contrib/__init__.py0
-rw-r--r--keystone-moon/keystone/tests/unit/contrib/federation/__init__.py0
-rw-r--r--keystone-moon/keystone/tests/unit/contrib/federation/test_utils.py611
-rw-r--r--keystone-moon/keystone/tests/unit/core.py154
-rw-r--r--keystone-moon/keystone/tests/unit/default_fixtures.py4
-rw-r--r--keystone-moon/keystone/tests/unit/fakeldap.py43
-rw-r--r--keystone-moon/keystone/tests/unit/filtering.py31
-rw-r--r--keystone-moon/keystone/tests/unit/identity/test_core.py61
-rw-r--r--keystone-moon/keystone/tests/unit/ksfixtures/database.py29
-rw-r--r--keystone-moon/keystone/tests/unit/ksfixtures/hacking.py88
-rw-r--r--keystone-moon/keystone/tests/unit/ksfixtures/key_repository.py6
-rw-r--r--keystone-moon/keystone/tests/unit/ksfixtures/ldapdb.py36
-rw-r--r--keystone-moon/keystone/tests/unit/mapping_fixtures.py11
-rw-r--r--keystone-moon/keystone/tests/unit/rest.py7
-rw-r--r--keystone-moon/keystone/tests/unit/saml2/signed_saml2_assertion.xml12
-rw-r--r--keystone-moon/keystone/tests/unit/test_associate_project_endpoint_extension.py223
-rw-r--r--keystone-moon/keystone/tests/unit/test_auth.py115
-rw-r--r--keystone-moon/keystone/tests/unit/test_auth_plugin.py52
-rw-r--r--keystone-moon/keystone/tests/unit/test_backend.py533
-rw-r--r--keystone-moon/keystone/tests/unit/test_backend_endpoint_policy.py1
-rw-r--r--keystone-moon/keystone/tests/unit/test_backend_endpoint_policy_sql.py3
-rw-r--r--keystone-moon/keystone/tests/unit/test_backend_federation_sql.py7
-rw-r--r--keystone-moon/keystone/tests/unit/test_backend_kvs.py35
-rw-r--r--keystone-moon/keystone/tests/unit/test_backend_ldap.py572
-rw-r--r--keystone-moon/keystone/tests/unit/test_backend_ldap_pool.py4
-rw-r--r--keystone-moon/keystone/tests/unit/test_backend_rules.py4
-rw-r--r--keystone-moon/keystone/tests/unit/test_backend_sql.py150
-rw-r--r--keystone-moon/keystone/tests/unit/test_backend_templated.py124
-rw-r--r--keystone-moon/keystone/tests/unit/test_cache.py8
-rw-r--r--keystone-moon/keystone/tests/unit/test_cache_backend_mongo.py9
-rw-r--r--keystone-moon/keystone/tests/unit/test_catalog.py98
-rw-r--r--keystone-moon/keystone/tests/unit/test_cert_setup.py4
-rw-r--r--keystone-moon/keystone/tests/unit/test_cli.py14
-rw-r--r--keystone-moon/keystone/tests/unit/test_config.py6
-rw-r--r--keystone-moon/keystone/tests/unit/test_contrib_ec2.py208
-rw-r--r--keystone-moon/keystone/tests/unit/test_exception.py12
-rw-r--r--keystone-moon/keystone/tests/unit/test_hacking_checks.py16
-rw-r--r--keystone-moon/keystone/tests/unit/test_kvs.py19
-rw-r--r--keystone-moon/keystone/tests/unit/test_ldap_livetest.py22
-rw-r--r--keystone-moon/keystone/tests/unit/test_ldap_pool_livetest.py10
-rw-r--r--keystone-moon/keystone/tests/unit/test_ldap_tls_livetest.py7
-rw-r--r--keystone-moon/keystone/tests/unit/test_policy.py45
-rw-r--r--keystone-moon/keystone/tests/unit/test_revoke.py21
-rw-r--r--keystone-moon/keystone/tests/unit/test_sql_migrate_extensions.py108
-rw-r--r--keystone-moon/keystone/tests/unit/test_sql_upgrade.py403
-rw-r--r--keystone-moon/keystone/tests/unit/test_ssl.py10
-rw-r--r--keystone-moon/keystone/tests/unit/test_token_provider.py35
-rw-r--r--keystone-moon/keystone/tests/unit/test_v2.py53
-rw-r--r--keystone-moon/keystone/tests/unit/test_v2_controller.py48
-rw-r--r--keystone-moon/keystone/tests/unit/test_v2_keystoneclient.py331
-rw-r--r--keystone-moon/keystone/tests/unit/test_v3.py197
-rw-r--r--keystone-moon/keystone/tests/unit/test_v3_assignment.py739
-rw-r--r--keystone-moon/keystone/tests/unit/test_v3_auth.py843
-rw-r--r--keystone-moon/keystone/tests/unit/test_v3_catalog.py242
-rw-r--r--keystone-moon/keystone/tests/unit/test_v3_controller.py1
-rw-r--r--keystone-moon/keystone/tests/unit/test_v3_credential.py10
-rw-r--r--keystone-moon/keystone/tests/unit/test_v3_endpoint_policy.py10
-rw-r--r--keystone-moon/keystone/tests/unit/test_v3_federation.py1450
-rw-r--r--keystone-moon/keystone/tests/unit/test_v3_filters.py10
-rw-r--r--keystone-moon/keystone/tests/unit/test_v3_identity.py58
-rw-r--r--keystone-moon/keystone/tests/unit/test_v3_oauth1.py10
-rw-r--r--keystone-moon/keystone/tests/unit/test_v3_os_revoke.py13
-rw-r--r--keystone-moon/keystone/tests/unit/test_v3_protection.py186
-rw-r--r--keystone-moon/keystone/tests/unit/test_validation.py340
-rw-r--r--keystone-moon/keystone/tests/unit/test_versions.py64
-rw-r--r--keystone-moon/keystone/tests/unit/test_wsgi.py79
-rw-r--r--keystone-moon/keystone/tests/unit/tests/test_core.py17
-rw-r--r--keystone-moon/keystone/tests/unit/token/test_fernet_provider.py370
-rw-r--r--keystone-moon/keystone/tests/unit/token/test_pki_provider.py26
-rw-r--r--keystone-moon/keystone/tests/unit/token/test_pkiz_provider.py26
-rw-r--r--keystone-moon/keystone/tests/unit/token/test_provider.py7
-rw-r--r--keystone-moon/keystone/tests/unit/token/test_token_model.py6
-rw-r--r--keystone-moon/keystone/tests/unit/token/test_uuid_provider.py26
-rw-r--r--keystone-moon/keystone/token/controllers.py20
-rw-r--r--keystone-moon/keystone/token/persistence/__init__.py2
-rw-r--r--keystone-moon/keystone/token/persistence/backends/kvs.py13
-rw-r--r--keystone-moon/keystone/token/persistence/backends/sql.py4
-rw-r--r--keystone-moon/keystone/token/persistence/core.py28
-rw-r--r--keystone-moon/keystone/token/provider.py25
-rw-r--r--keystone-moon/keystone/token/providers/common.py157
-rw-r--r--keystone-moon/keystone/token/providers/fernet/core.py229
-rw-r--r--keystone-moon/keystone/token/providers/fernet/token_formatters.py188
-rw-r--r--keystone-moon/keystone/token/providers/fernet/utils.py58
-rw-r--r--keystone-moon/keystone/token/providers/pki.py8
-rw-r--r--keystone-moon/keystone/token/providers/pkiz.py8
-rw-r--r--keystone-moon/keystone/token/providers/uuid.py8
-rw-r--r--keystone-moon/keystone/token/utils.py27
-rw-r--r--keystone-moon/keystone/trust/backends/sql.py7
-rw-r--r--keystone-moon/keystone/trust/controllers.py39
-rw-r--r--keystone-moon/keystone/trust/core.py28
-rw-r--r--keystone-moon/keystone/trust/schema.py7
-rw-r--r--keystone-moon/rally-jobs/README.rst5
-rw-r--r--keystone-moon/rally-jobs/keystone.yaml167
-rw-r--r--keystone-moon/requirements.txt48
-rw-r--r--keystone-moon/setup.cfg152
-rw-r--r--keystone-moon/setup.py3
-rw-r--r--keystone-moon/test-requirements.txt34
-rwxr-xr-x[-rw-r--r--]keystone-moon/tools/pretty_tox.sh0
-rwxr-xr-xkeystone-moon/tools/sample_data.sh160
-rw-r--r--keystone-moon/tox.ini108
-rw-r--r--moonclient/moonclient/tests.py1
-rw-r--r--moonclient/moonclient/tests/functional_tests.json0
368 files changed, 20012 insertions, 12372 deletions
diff --git a/keystone-moon/.coveragerc b/keystone-moon/.coveragerc
new file mode 100644
index 00000000..de2b16c5
--- /dev/null
+++ b/keystone-moon/.coveragerc
@@ -0,0 +1,7 @@
+[run]
+branch = True
+source = keystone
+omit = keystone/tests/*,keystone/openstack/*
+
+[report]
+ignore-errors = True
diff --git a/keystone-moon/.mailmap b/keystone-moon/.mailmap
new file mode 100644
index 00000000..ed8b7759
--- /dev/null
+++ b/keystone-moon/.mailmap
@@ -0,0 +1,28 @@
+# Format is:
+# <preferred e-mail> <other e-mail 1>
+# <preferred e-mail> <other e-mail 2>
+<adam.gandelman@canonical.com> <adamg@canonical.com>
+<bknudson@us.ibm.com> <blk@acm.org>
+<brian.waldon@rackspace.com> <bcwaldon@gmail.com>
+<brian.lamar@rackspace.com> <brian.lamar@gmail.com>
+<dolph.mathews@gmail.com> <dolph.mathews@rackspace.com>
+<jeblair@hp.com> <james.blair@rackspace.com>
+<jeblair@hp.com> <corvus@gnu.org>
+<jaypipes@gmail.com> <jpipes@uberbox.gateway.2wire.net>
+Joe Gordon <joe.gordon0@gmail.com> <jogo@cloudscaling.com>
+<john.eo@rackspace.com> <john.eo@gmail.com>
+<khaled.hussein@rackspace.com> <khaled.hussein@gmail.com>
+<liem_m_nguyen@hp.com> <liem.m.nguyen@gmail.com>
+<liem_m_nguyen@hp.com> <liem.m.nguyen@hp.com>
+<morgan.fainberg@gmail.com> <m@metacloud.com>
+<rjuvvadi@hcl.com> <ramana@venus.lekha.org>
+<rjuvvadi@hcl.com> <rrjuvvadi@gmail.com>
+<xtoddx@gmail.com> <todd@rubidine.com>
+<github@highbridgellc.com> <gihub@highbridgellc.com>
+<github@highbridgellc.com> <ziad.sawalha@rackspace.com>
+Sirish Bitra <sirish.bitra@gmail.com> sirish.bitra <sirish.bitra@gmail.com>
+Sirish Bitra <sirish.bitra@gmail.com> sirishbitra <sirish.bitra@gmail.com>
+Sirish Bitra <sirish.bitra@gmail.com> root <root@bsirish.(none)>
+Zhongyue Luo <zhongyue.nah@intel.com> <lzyeval@gmail.com>
+Chmouel Boudjnah <chmouel@enovance.com> <chmouel@chmouel.com>
+Zhenguo Niu <zhenguo@unitedstack.com> <Niu.ZGlinux@gmail.com>
diff --git a/keystone-moon/.testr.conf b/keystone-moon/.testr.conf
new file mode 100644
index 00000000..74698954
--- /dev/null
+++ b/keystone-moon/.testr.conf
@@ -0,0 +1,19 @@
+[DEFAULT]
+test_command=
+ OS_STDOUT_CAPTURE=${OS_STDOUT_CAPTURE:-1} \
+ OS_STDERR_CAPTURE=${OS_STDERR_CAPTURE:-1} \
+ OS_LOG_CAPTURE=${OS_LOG_CAPTURE:-1} \
+ ${PYTHON:-python} -m subunit.run discover -t ./ ${OS_TEST_PATH:-./keystone/tests/unit} $LISTOPT $IDOPTION
+
+test_id_option=--load-list $IDFILE
+test_list_option=--list
+group_regex=.*(test_cert_setup)
+
+
+# NOTE(morganfainberg): If single-worker mode is wanted (e.g. for live tests)
+# the environment variable ``TEST_RUN_CONCURRENCY`` should be set to ``1``. If
+# a non-default (1 worker per available core) concurrency is desired, set
+# environment variable ``TEST_RUN_CONCURRENCY`` to the desired number of
+# workers.
+test_run_concurrency=echo ${TEST_RUN_CONCURRENCY:-0}
+
diff --git a/keystone-moon/CONTRIBUTING.rst b/keystone-moon/CONTRIBUTING.rst
index fc3d3663..b6d701b5 100644
--- a/keystone-moon/CONTRIBUTING.rst
+++ b/keystone-moon/CONTRIBUTING.rst
@@ -1,13 +1,14 @@
-If you would like to contribute to the development of OpenStack,
-you must follow the steps documented at:
+If you would like to contribute to the development of OpenStack, you must
+follow the steps in this page:
- http://wiki.openstack.org/HowToContribute#If_you.27re_a_developer
+ http://docs.openstack.org/infra/manual/developers.html
-Once those steps have been completed, changes to OpenStack
-should be submitted for review via the Gerrit tool, following
-the workflow documented at:
+If you already have a good understanding of how the system works and your
+OpenStack accounts are set up, you can skip to the development workflow
+section of this documentation to learn how changes to OpenStack should be
+submitted for review via the Gerrit tool:
- http://wiki.openstack.org/GerritWorkflow
+ http://docs.openstack.org/infra/manual/developers.html#development-workflow
Pull requests submitted through GitHub will be ignored.
diff --git a/keystone-moon/MANIFEST.in b/keystone-moon/MANIFEST.in
index fa69c1aa..39db99e6 100644
--- a/keystone-moon/MANIFEST.in
+++ b/keystone-moon/MANIFEST.in
@@ -1,16 +1,14 @@
include AUTHORS
include babel.cfg
include ChangeLog
-include CONTRIBUTING.txt
+include CONTRIBUTING.rst
include LICENSE
include HACKING.rst
include README.rst
include openstack-common.conf
-include run_tests.py
include run_tests.sh
include setup.cfg
include setup.py
-include TODO
include tox.ini
include etc/*
include httpd/*
diff --git a/keystone-moon/README.rst b/keystone-moon/README.rst
index 853873ce..65ba7ccb 100644
--- a/keystone-moon/README.rst
+++ b/keystone-moon/README.rst
@@ -10,7 +10,7 @@ such as LDAP.
Developer documentation, the source of which is in ``doc/source/``, is
published at:
- http://keystone.openstack.org/
+ http://docs.openstack.org/developer/keystone/
The API specification and documentation are available at:
@@ -18,7 +18,7 @@ The API specification and documentation are available at:
The canonical client library is available at:
- https://github.com/openstack/python-keystoneclient
+ https://git.openstack.org/cgit/openstack/python-keystoneclient
Documentation for cloud administrators is available at:
@@ -26,7 +26,7 @@ Documentation for cloud administrators is available at:
The source of documentation for cloud administrators is available at:
- https://github.com/openstack/openstack-manuals
+ https://git.openstack.org/cgit/openstack/openstack-manuals
Information about our team meeting is available at:
diff --git a/keystone-moon/bandit.yaml b/keystone-moon/bandit.yaml
new file mode 100644
index 00000000..89d2551d
--- /dev/null
+++ b/keystone-moon/bandit.yaml
@@ -0,0 +1,134 @@
+# optional: after how many files to update progress
+#show_progress_every: 100
+
+# optional: plugins directory name
+#plugins_dir: 'plugins'
+
+# optional: plugins discovery name pattern
+plugin_name_pattern: '*.py'
+
+# optional: terminal escape sequences to display colors
+#output_colors:
+# DEFAULT: '\033[0m'
+# HEADER: '\033[95m'
+# INFO: '\033[94m'
+# WARN: '\033[93m'
+# ERROR: '\033[91m'
+
+# optional: log format string
+#log_format: "[%(module)s]\t%(levelname)s\t%(message)s"
+
+# globs of files which should be analyzed
+include:
+ - '*.py'
+ - '*.pyw'
+
+# a list of strings, which if found in the path will cause files to be excluded
+# for example /tests/ - to remove all all files in tests directory
+exclude_dirs:
+ - '/tests/'
+
+profiles:
+ keystone_conservative:
+ include:
+ - blacklist_calls
+ - blacklist_imports
+ - request_with_no_cert_validation
+ - exec_used
+ - set_bad_file_permissions
+ - subprocess_popen_with_shell_equals_true
+ - linux_commands_wildcard_injection
+ - ssl_with_bad_version
+
+
+ keystone_verbose:
+ include:
+ - blacklist_calls
+ - blacklist_imports
+ - request_with_no_cert_validation
+ - exec_used
+ - set_bad_file_permissions
+ - hardcoded_tmp_directory
+ - subprocess_popen_with_shell_equals_true
+ - any_other_function_with_shell_equals_true
+ - linux_commands_wildcard_injection
+ - ssl_with_bad_version
+ - ssl_with_bad_defaults
+
+blacklist_calls:
+ bad_name_sets:
+ - pickle:
+ qualnames: [pickle.loads, pickle.load, pickle.Unpickler,
+ cPickle.loads, cPickle.load, cPickle.Unpickler]
+ message: "Pickle library appears to be in use, possible security issue."
+ - marshal:
+ qualnames: [marshal.load, marshal.loads]
+ message: "Deserialization with the marshal module is possibly dangerous."
+ - md5:
+ qualnames: [hashlib.md5]
+ message: "Use of insecure MD5 hash function."
+ - mktemp_q:
+ qualnames: [tempfile.mktemp]
+ message: "Use of insecure and deprecated function (mktemp)."
+ - eval:
+ qualnames: [eval]
+ message: "Use of possibly insecure function - consider using safer ast.literal_eval."
+ - mark_safe:
+ names: [mark_safe]
+ message: "Use of mark_safe() may expose cross-site scripting vulnerabilities and should be reviewed."
+ - httpsconnection:
+ qualnames: [httplib.HTTPSConnection]
+ message: "Use of HTTPSConnection does not provide security, see https://wiki.openstack.org/wiki/OSSN/OSSN-0033"
+ - yaml_load:
+ qualnames: [yaml.load]
+ message: "Use of unsafe yaml load. Allows instantiation of arbitrary objects. Consider yaml.safe_load()."
+ - urllib_urlopen:
+ qualnames: [urllib.urlopen, urllib.urlretrieve, urllib.URLopener, urllib.FancyURLopener, urllib2.urlopen, urllib2.Request]
+ message: "Audit url open for permitted schemes. Allowing use of file:/ or custom schemes is often unexpected."
+
+shell_injection:
+ # Start a process using the subprocess module, or one of its wrappers.
+ subprocess: [subprocess.Popen, subprocess.call, subprocess.check_call,
+ subprocess.check_output, utils.execute, utils.execute_with_timeout]
+ # Start a process with a function vulnerable to shell injection.
+ shell: [os.system, os.popen, os.popen2, os.popen3, os.popen4,
+ popen2.popen2, popen2.popen3, popen2.popen4, popen2.Popen3,
+ popen2.Popen4, commands.getoutput, commands.getstatusoutput]
+ # Start a process with a function that is not vulnerable to shell injection.
+ no_shell: [os.execl, os.execle, os.execlp, os.execlpe, os.execv,os.execve,
+ os.execvp, os.execvpe, os.spawnl, os.spawnle, os.spawnlp,
+ os.spawnlpe, os.spawnv, os.spawnve, os.spawnvp, os.spawnvpe,
+ os.startfile]
+
+blacklist_imports:
+ bad_import_sets:
+ - telnet:
+ imports: [telnetlib]
+ level: ERROR
+ message: "Telnet is considered insecure. Use SSH or some other encrypted protocol."
+
+hardcoded_password:
+ word_list: "wordlist/default-passwords"
+
+ssl_with_bad_version:
+ bad_protocol_versions:
+ - 'PROTOCOL_SSLv2'
+ - 'SSLv2_METHOD'
+ - 'SSLv23_METHOD'
+ - 'PROTOCOL_SSLv3' # strict option
+ - 'PROTOCOL_TLSv1' # strict option
+ - 'SSLv3_METHOD' # strict option
+ - 'TLSv1_METHOD' # strict option
+
+password_config_option_not_marked_secret:
+ function_names:
+ - oslo.config.cfg.StrOpt
+ - oslo_config.cfg.StrOpt
+
+execute_with_run_as_root_equals_true:
+ function_names:
+ - ceilometer.utils.execute
+ - cinder.utils.execute
+ - neutron.agent.linux.utils.execute
+ - nova.utils.execute
+ - nova.utils.trycmd
diff --git a/keystone-moon/config-generator/keystone.conf b/keystone-moon/config-generator/keystone.conf
index 920c650d..0e00d489 100644
--- a/keystone-moon/config-generator/keystone.conf
+++ b/keystone-moon/config-generator/keystone.conf
@@ -3,12 +3,12 @@ output_file = etc/keystone.conf.sample
wrap_width = 79
namespace = keystone
namespace = keystone.notifications
-namespace = keystone.openstack.common.eventlet_backdoor
namespace = oslo.log
namespace = oslo.messaging
namespace = oslo.policy
namespace = oslo.db
namespace = oslo.middleware
+namespace = oslo.service.service
# We don't use oslo.concurrency config options in
# keystone now, just in case it slips through unnoticed.
#namespace = oslo.concurrency
diff --git a/keystone-moon/doc/source/apache-httpd.rst b/keystone-moon/doc/source/apache-httpd.rst
index c075512f..91eb7011 100644
--- a/keystone-moon/doc/source/apache-httpd.rst
+++ b/keystone-moon/doc/source/apache-httpd.rst
@@ -31,38 +31,38 @@ Running Keystone in HTTPD
Files
-----
-Copy the file httpd/wsgi-keystone.conf to the appropriate location for your
-Apache server, most likely::
+Copy the ``httpd/wsgi-keystone.conf`` sample configuration file to the
+appropriate location for your Apache server::
- /etc/httpd/conf.d/wsgi-keystone.conf
+ /etc/$APACHE_DIR/conf.d/sites-available/wsgi-keystone.conf
-Update this file to match your system configuration (for example, some
-distributions put httpd logs in the ``apache2`` directory and some in the
-``httpd`` directory; also, enable TLS).
+Where ``$APACHE_DIR`` is ``httpd`` on Fedora-based systems and ``apache2`` on
+Debian/Ubuntu systems.
-Create the directory ``/var/www/cgi-bin/keystone/``. You can either hardlink or
-softlink the files ``main`` and ``admin`` to the file ``keystone.py`` in this
-directory. For a distribution appropriate place, it should probably be copied
-to::
+Update the file to match your system configuration. Note the following:
- /usr/share/openstack/keystone/httpd/keystone.py
+* Make sure the correct log directory is used. Some distributions put httpd
+ server logs in the ``apache2`` directory and some in the ``httpd`` directory.
+* Enable TLS by supplying the correct certificates.
Keystone's primary configuration file (``etc/keystone.conf``) and the
PasteDeploy configuration file (``etc/keystone-paste.ini``) must be readable to
HTTPD in one of the default locations described in :doc:`configuration`.
-SELinux
--------
+Enable the site by creating a symlink from ``sites-enabled`` to the file in
+``sites-available``::
-If you are running with SELinux enabled (and you should be) make sure that the
-file has the appropriate SELinux context to access the linked file. If you
-have the file in /var/www/cgi-bin, you can do this by running:
+ ln -s /etc/$APACHE_DIR/sites-available/keystone.conf /etc/$APACHE_DIR/sites-enabled/
-.. code-block:: bash
+Restart Apache to have it start serving keystone.
- $ sudo restorecon /var/www/cgi-bin
-Putting it somewhere else requires you set up your SELinux policy accordingly.
+Access Control
+--------------
+
+If you are running with Linux kernel security module enabled (for example
+SELinux or AppArmor) make sure that the file has the appropriate context to
+access the linked file.
Keystone Configuration
----------------------
@@ -74,20 +74,20 @@ between processes.
.. WARNING::
- The KVS (``keystone.token.persistence.backends.kvs.Token``) token
- persistence driver cannot be shared between processes so must not be used
- when running keystone under HTTPD (the tokens will not be shared between
- the processes of the server and validation will fail).
+ The KVS (``kvs``) token persistence driver cannot be shared between
+ processes so must not be used when running keystone under HTTPD (the tokens
+ will not be shared between the processes of the server and validation will
+ fail).
For SQL, in ``/etc/keystone/keystone.conf`` set::
[token]
- driver = keystone.token.persistence.backends.sql.Token
+ driver = sql
For memcached, in ``/etc/keystone/keystone.conf`` set::
[token]
- driver = keystone.token.persistence.backends.memcache.Token
+ driver = memcache
All servers that are storing tokens need a shared backend. This means that
either all servers use the same database server or use a common memcached pool.
diff --git a/keystone-moon/doc/source/api_curl_examples.rst b/keystone-moon/doc/source/api_curl_examples.rst
index a4b31553..c88c7fd0 100644
--- a/keystone-moon/doc/source/api_curl_examples.rst
+++ b/keystone-moon/doc/source/api_curl_examples.rst
@@ -72,8 +72,8 @@ Example response::
"id": "ef303187fc8d41668f25199c298396a5"}], "type": "identity", "id":
"bd73972c0e14fb69bae8ff76e112a90", "name": "keystone"}], "extras": {},
"user": {"domain": {"id": "default", "name": "Default"}, "id":
- "3ec3164f750146be97f21559ee4d9c51", "name": "admin"}, "issued_at":
- "201406-10T20:55:16.806027Z"}}
+ "3ec3164f750146be97f21559ee4d9c51", "name": "admin"}, "audit_ids":
+ ["yRt0UrxJSs6-WYJgwEMMmg"], "issued_at": "201406-10T20:55:16.806027Z"}}
Project-scoped
@@ -116,11 +116,11 @@ Example response::
Content-Length: 960
Date: Tue, 10 Jun 2014 20:40:14 GMT
- {"token": {"methods": ["password"], "roles": [{"id":
- "c703057be878458588961ce9a0ce686b", "name": "admin"}], "expires_at":
- "2014-06-10T21:40:14.360795Z", "project": {"domain": {"id": "default",
- "name": "Default"}, "id": "3d4c2c82bd5948f0bcab0cf3a7c9b48c", "name":
- "demo"}, "catalog": [{"endpoints": [{"url":
+ {"token": {"audit_ids": ["ECwrVNWbSCqmEgPnu0YCRw"], "methods": ["password"],
+ "roles": [{"id": "c703057be878458588961ce9a0ce686b", "name": "admin"}],
+ "expires_at": "2014-06-10T21:40:14.360795Z", "project": {"domain": {"id":
+ "default", "name": "Default"}, "id": "3d4c2c82bd5948f0bcab0cf3a7c9b48c",
+ "name": "demo"}, "catalog": [{"endpoints": [{"url":
"http://localhost:35357/v2.0", "region": "RegionOne", "interface": "admin",
"id": "29beb2f1567642eb810b042b6719ea88"}, {"url":
"http://localhost:5000/v2.0", "region": "RegionOne", "interface":
@@ -184,7 +184,8 @@ Example response::
"id": "ef303187fc8d41668f25199c298396a5"}], "type": "identity", "id":
"bd7397d2c0e14fb69bae8ff76e112a90", "name": "keystone"}], "extras": {},
"user": {"domain": {"id": "default", "name": "Default"}, "id":
- "3ec3164f750146be97f21559ee4d9c51", "name": "admin"}, "issued_at":
+ "3ec3164f750146be97f21559ee4d9c51", "name": "admin"},
+ "audit_ids": ["Xpa6Uyn-T9S6mTREudUH3w"], "issued_at":
"2014-06-10T20:52:58.852194Z"}}
@@ -219,23 +220,11 @@ Example response::
Content-Length: 1034
Date: Tue, 10 Jun 2014 21:00:05 GMT
- {"token": {"methods": ["token", "password"], "roles": [{"id":
- "9fe2ff9ee4384b1894a90878d3e92bab", "name": "_member_"}, {"id":
- "c703057be878458588961ce9a0ce686b", "name": "admin"}], "expires_at":
- "2014-06-10T21:55:16.806001Z", "project": {"domain": {"id": "default",
- "name": "Default"}, "id": "8538a3f13f9541b28c2620eb19065e45", "name":
- "admin"}, "catalog": [{"endpoints": [{"url": "http://localhost:35357/v2.0",
- "region": "RegionOne", "interface": "admin", "id":
- "29beb2f1567642eb810b042b6719ea88"}, {"url": "http://localhost:5000/v2.0",
- "region": "RegionOne", "interface": "internal", "id":
- "87057e3735d4415c97ae231b4841eb1c"}, {"url": "http://localhost:5000/v2.0",
- "region": "RegionOne", "interface": "public", "id":
- "ef303187fc8d41668f25199c298396a5"}], "type": "identity", "id":
- "bd7397d2c0e14fb69bae8ff76e112a90", "name": "keystone"}], "extras": {},
- "user": {"domain": {"id": "default", "name": "Default"}, "id":
- "3ec3164f750146be97f21559ee4d9c51", "name": "admin"}, "issued_at":
- "2014-06-10T21:00:05.548559Z"}}
-
+ {"token": {"methods": ["token", "password"], "expires_at":
+ "2015-05-28T07:43:44.808209Z", "extras": {}, "user": {"domain": {"id":
+ "default", "name": "Default"}, "id": "753867c25c3340ffad1abc22d488c31a",
+ "name": "admin"}, "audit_ids": ["ZE0OPSuzTmCXHo0eIOYltw",
+ "xxIQCkHOQOywL0oY6CTppQ"], "issued_at": "2015-05-28T07:19:23.763532Z"}}
.. note::
@@ -755,7 +744,53 @@ and supported media types:
$ curl http://0.0.0.0:35357
-or:
+.. code-block:: javascript
+
+ {
+ "versions": {
+ "values": [
+ {
+ "id": "v3.4",
+ "links": [
+ {
+ "href": "http://127.0.0.1:35357/v3/",
+ "rel": "self"
+ }
+ ],
+ "media-types": [
+ {
+ "base": "application/json",
+ "type": "application/vnd.openstack.identity-v3+json"
+ }
+ ],
+ "status": "stable",
+ "updated": "2015-03-30T00:00:00Z"
+ },
+ {
+ "id": "v2.0",
+ "links": [
+ {
+ "href": "http://127.0.0.1:35357/v2.0/",
+ "rel": "self"
+ },
+ {
+ "href": "http://docs.openstack.org/",
+ "rel": "describedby",
+ "type": "text/html"
+ }
+ ],
+ "media-types": [
+ {
+ "base": "application/json",
+ "type": "application/vnd.openstack.identity-v2.0+json"
+ }
+ ],
+ "status": "stable",
+ "updated": "2014-04-17T00:00:00Z"
+ }
+ ]
+ }
+ }
.. code-block:: bash
@@ -766,27 +801,27 @@ Returns:
.. code-block:: javascript
{
- "version":{
- "id":"v2.0",
- "status":"beta",
- "updated":"2011-11-19T00:00:00Z",
- "links":[
+ "version": {
+ "id": "v2.0",
+ "links": [
{
- "rel":"self",
- "href":"http://127.0.0.1:35357/v2.0/"
+ "href": "http://127.0.0.1:35357/v2.0/",
+ "rel": "self"
},
{
- "rel":"describedby",
- "type":"text/html",
- "href":"http://docs.openstack.org/"
- },
+ "href": "http://docs.openstack.org/",
+ "rel": "describedby",
+ "type": "text/html"
+ }
],
- "media-types":[
+ "media-types": [
{
- "base":"application/json",
- "type":"application/vnd.openstack.identity-v2.0+json"
+ "base": "application/json",
+ "type": "application/vnd.openstack.identity-v2.0+json"
}
- ]
+ ],
+ "status": "stable",
+ "updated": "2014-04-17T00:00:00Z"
}
}
diff --git a/keystone-moon/doc/source/community.rst b/keystone-moon/doc/source/community.rst
index e1df0b89..dfb0870f 100644
--- a/keystone-moon/doc/source/community.rst
+++ b/keystone-moon/doc/source/community.rst
@@ -65,10 +65,10 @@ Keystone on Launchpad
Launchpad is a code hosting that OpenStack is using to track bugs, feature
work, and releases of OpenStack. Like other OpenStack projects, Keystone source
-code is hosted on GitHub
+code is hosted on git.openstack.org
* `Keystone Project Page on Launchpad <http://launchpad.net/keystone>`_
-* `Keystone Source Repository on GitHub <http://github.com/openstack/keystone>`_
+* `Keystone Source Repository <https://git.openstack.org/cgit/openstack/keystone>`_
Within launchpad, we use
`blueprints <https://blueprints.launchpad.net/keystone>`_, to track feature
diff --git a/keystone-moon/doc/source/conf.py b/keystone-moon/doc/source/conf.py
index fe46f326..7cca2857 100644
--- a/keystone-moon/doc/source/conf.py
+++ b/keystone-moon/doc/source/conf.py
@@ -24,14 +24,8 @@
# All configuration values have a default; values that are commented out
# serve to show the default.
-import sys
import os
-# If extensions (or modules to document with autodoc) are in another directory,
-# add these directories to sys.path here. If the directory is relative to the
-# documentation root, use os.path.abspath to make it absolute, like shown here.
-sys.path.insert(0, os.path.abspath('..')) # NOTE(dstanek): path for our
- # Sphinx extension
# NOTE(dstanek): adds _ to the builtins so keystone modules can be imported
__builtins__['_'] = str
@@ -49,10 +43,6 @@ extensions = ['sphinx.ext.autodoc',
'sphinx.ext.coverage',
'sphinx.ext.viewcode',
'oslosphinx',
- # NOTE(dstanek): Uncomment the [pbr] section in setup.cfg and
- # remove this Sphinx extension when
- # https://launchpad.net/bugs/1260495 is fixed.
- 'ext.apidoc',
]
todo_include_todos = True
diff --git a/keystone-moon/doc/source/configuration.rst b/keystone-moon/doc/source/configuration.rst
index e365f0ed..96491660 100644
--- a/keystone-moon/doc/source/configuration.rst
+++ b/keystone-moon/doc/source/configuration.rst
@@ -156,10 +156,6 @@ configuration file.
Domain-specific Drivers
-----------------------
-.. NOTE::
-
- This functionality is new in Juno.
-
Keystone supports the option (disabled by default) to specify identity driver
configurations on a domain by domain basis, allowing, for example, a specific
domain to have its own LDAP or SQL server. This is configured by specifying the
@@ -182,12 +178,74 @@ the primary configuration file for the specified domain only. Domains without a
specific configuration file will continue to use the options from the primary
configuration file.
+Keystone also supports the ability to store the domain-specific configuration
+options in the keystone SQL database, managed via the Identity API, as opposed
+to using domain-specific configuration files.
+
.. NOTE::
- It is important to notice that by enabling this configuration, the
- operations of listing all users and listing all groups are not supported,
- those calls will need that either a domain filter is specified or the usage
- of a domain scoped token.
+ The ability to store and manage configuration options via the Identity API
+ is new and experimental in Kilo.
+
+This capability (which is disabled by default) is enabled by specifying the
+following options in the main keystone configuration file:
+
+.. code-block:: ini
+
+ [identity]
+ domain_specific_drivers_enabled = true
+ domain_configurations_from_database = true
+
+Once enabled, any existing domain-specific configuration files in the
+configuration directory will be ignored and only those domain-specific
+configuration options specified via the Identity API will be used.
+
+Unlike the file-based method of specifying domain-specific configurations,
+options specified via the Identity API will become active without needing to
+restart the keystone server. For performance reasons, the current state of
+configuration options for a domain are cached in the keystone server, and in
+multi-process and multi-threaded keystone configurations, the new
+configuration options may not become active until the cache has timed out. The
+cache settings for domain config options can be adjusted in the general
+keystone configuration file (option ``cache_time`` in the ``domain_config``
+group).
+
+.. NOTE::
+
+ It is important to notice that when using either of these methods of
+ specifying domain-specific configuration options, the main keystone
+ configuration file is still maintained. Only those options that relate
+ to the Identity driver for users and groups (i.e. specifying whether the
+ driver for this domain is SQL or LDAP, and, if LDAP, the options that
+ define that connection) are supported in a domain-specific manner. Further,
+ when using the configuration options via the Identity API, the driver
+ option must be set to an LDAP driver (attempting to set it to an SQL driver
+ will generate an error when it is subsequently used).
+
+For existing installations that already use file-based domain-specific
+configurations who wish to migrate to the SQL-based approach, the
+``keystone-manage`` command can be used to upload all configuration files to
+the SQL database:
+
+.. code-block:: bash
+
+ $ keystone-manage domain_config_upload --all
+
+Once uploaded, these domain-configuration options will be visible via the
+Identity API as well as applied to the domain-specific drivers. It is also
+possible to upload individual domain-specific configuration files by
+specifying the domain name:
+
+.. code-block:: bash
+
+ $ keystone-manage domain_config_upload --domain-name DOMAINA
+
+.. NOTE::
+
+ It is important to notice that by enabling either of the domain-specific
+ configuration methods, the operations of listing all users and listing all
+ groups are not supported, those calls will need either a domain filter to
+ be specified or usage of a domain scoped token.
.. NOTE::
@@ -197,17 +255,21 @@ configuration file.
.. NOTE::
- To delete a domain that uses a domain specific backend, it's necessary to
- first disable it, remove its specific configuration file (i.e. its
- corresponding keystone.<domain_name>.conf) and then restart the Identity
- server.
+ When using the file-based domain-specific configuration method, to delete a
+ domain that uses a domain specific backend, it's necessary to first disable
+ it, remove its specific configuration file (i.e. its corresponding
+ keystone.<domain_name>.conf) and then restart the Identity server. When
+ managing configuration options via the Identity API, the domain can simply
+ be disabled and deleted via the Identity API; since any domain-specific
+ configuration options will automatically be removed.
.. NOTE::
- Although Keystone supports multiple LDAP backends via domain specific
- configuration files, it currently only supports one SQL backend. This could
- be either the default driver or a single domain-specific backend, perhaps
- for storing service users in a predominantly LDAP installation.
+ Although Keystone supports multiple LDAP backends via the above
+ domain-specific configuration methods, it currently only supports one SQL
+ backend. This could be either the default driver or a single
+ domain-specific backend, perhaps for storing service users in a
+ predominantly LDAP installation.
Due to the need for user and group IDs to be unique across an OpenStack
installation and for Keystone to be able to deduce which domain and backend to
@@ -274,8 +336,8 @@ wish to make use of other generator algorithms that have a different trade-off
of attributes. A different generator can be installed by configuring the
following property:
-* ``generator`` - identity mapping generator. Defaults to
- ``keystone.identity.generators.sha256.Generator``
+* ``generator`` - identity mapping generator. Defaults to ``sha256``
+ (implemented by :class:`keystone.identity.id_generators.sha256.Generator`)
.. WARNING::
@@ -309,7 +371,7 @@ How to Implement an Authentication Plugin
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
All authentication plugins must extend the
-``keystone.auth.core.AuthMethodHandler`` class and implement the
+:class:`keystone.auth.core.AuthMethodHandler` class and implement the
``authenticate()`` method. The ``authenticate()`` method expects the following
parameters.
@@ -332,7 +394,7 @@ return the payload in the form of a dictionary for the next authentication
step.
If authentication is unsuccessful, the ``authenticate()`` method must raise a
-``keystone.exception.Unauthorized`` exception.
+:class:`keystone.exception.Unauthorized` exception.
Simply add the new plugin name to the ``methods`` list along with your plugin
class configuration in the ``[auth]`` sections of the configuration file to
@@ -365,30 +427,28 @@ provides three non-test persistence backends. These can be set with the
The drivers Keystone provides are:
-* ``keystone.token.persistence.backends.memcache_pool.Token`` - The pooled
- memcached token persistence engine. This backend supports the concept of
- pooled memcache client object (allowing for the re-use of the client
- objects). This backend has a number of extra tunable options in the
- ``[memcache]`` section of the config.
+* ``memcache_pool`` - The pooled memcached token persistence engine. This
+ backend supports the concept of pooled memcache client object (allowing for
+ the re-use of the client objects). This backend has a number of extra tunable
+ options in the ``[memcache]`` section of the config. Implemented by
+ :class:`keystone.token.persistence.backends.memcache_pool.Token`
-* ``keystone.token.persistence.backends.sql.Token`` - The SQL-based (default)
- token persistence engine.
+* ``sql`` - The SQL-based (default) token persistence engine. Implemented by
+ :class:`keystone.token.persistence.backends.sql.Token`
-* ``keystone.token.persistence.backends.memcache.Token`` - The memcached based
- token persistence backend. This backend relies on ``dogpile.cache`` and
- stores the token data in a set of memcached servers. The servers URLs are
- specified in the ``[memcache]\servers`` configuration option in the Keystone
- config.
+* ``memcache`` - The memcached based token persistence backend. This backend
+ relies on ``dogpile.cache`` and stores the token data in a set of memcached
+ servers. The servers URLs are specified in the ``[memcache]\servers``
+ configuration option in the Keystone config. Implemented by
+ :class:`keystone.token.persistence.backends.memcache.Token`
.. WARNING::
- It is recommended you use the
- ``keystone.token.persistence.backends.memcache_pool.Token`` backend instead
- of ``keystone.token.persistence.backends.memcache.Token`` as the token
- persistence driver if you are deploying Keystone under eventlet instead of
- Apache + mod_wsgi. This recommendation is due to known issues with the use
- of ``thread.local`` under eventlet that can allow the leaking of memcache
- client objects and consumption of extra sockets.
+ It is recommended you use the ``memcache_pool`` backend instead of
+ ``memcache`` as the token persistence driver if you are deploying Keystone
+ under eventlet instead of Apache + mod_wsgi. This recommendation is due to
+ known issues with the use of ``thread.local`` under eventlet that can allow
+ the leaking of memcache client objects and consumption of extra sockets.
Token Provider
@@ -399,8 +459,8 @@ Keystone supports customizable token provider and it is specified in the
PKI token providers. However, users may register their own token provider by
configuring the following property.
-* ``provider`` - token provider driver. Defaults to
- ``keystone.token.providers.uuid.Provider``
+* ``provider`` - token provider driver. Defaults to ``uuid``. Implemented by
+ :class:`keystone.token.providers.uuid.Provider`
UUID, PKI, PKIZ, or Fernet?
@@ -749,7 +809,7 @@ following states:
deployment at all times). In a multi-node Keystone deployment this would
allow for the *staged* key to be replicated to all Keystone nodes before
being promoted to *primary* on a single node. This prevents the case where a
- *primary* key is created on one Keystone node and tokens encryted/signed with
+ *primary* key is created on one Keystone node and tokens encrypted/signed with
that new *primary* are rejected on another Keystone node because the new
*primary* doesn't exist there yet.
@@ -790,7 +850,7 @@ A dynamic database-backed driver fully supporting persistent configuration.
.. code-block:: ini
[catalog]
- driver = keystone.catalog.backends.sql.Catalog
+ driver = sql
.. NOTE::
@@ -805,7 +865,7 @@ To build your service catalog using this driver, see the built-in help:
$ openstack help endpoint create
You can also refer to `an example in Keystone (tools/sample_data.sh)
-<https://github.com/openstack/keystone/blob/master/tools/sample_data.sh>`_.
+<https://git.openstack.org/cgit/openstack/keystone/tree/tools/sample_data.sh>`_.
File-based Service Catalog (``templated.Catalog``)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
@@ -826,7 +886,7 @@ catalog will not change very much over time.
.. code-block:: ini
[catalog]
- driver = keystone.catalog.backends.templated.Catalog
+ driver = templated
template_file = /opt/stack/keystone/etc/default_catalog.templates
The value of ``template_file`` is expected to be an absolute path to your
@@ -835,7 +895,7 @@ Keystone, however you should create your own to reflect your deployment.
Another such example is `available in devstack
(files/default_catalog.templates)
-<https://github.com/openstack-dev/devstack/blob/master/files/default_catalog.templates>`_.
+<https://git.openstack.org/cgit/openstack-dev/devstack/tree/files/default_catalog.templates>`_.
Logging
-------
@@ -1121,10 +1181,10 @@ The following attributes are available
would ensure that the user object that is being deleted is in the same
domain as the token provided.
-Every target object has an `id` and a `name` available as `target.<object>.id`
-and `target.<object>.name`. Other attributes are retrieved from the database
-and vary between object types. Moreover, some database fields are filtered out
-(e.g. user passwords).
+Every target object (except token) has an `id` and a `name` available as
+`target.<object>.id` and `target.<object>.name`. Other attributes are retrieved
+from the database and vary between object types. Moreover, some database fields
+are filtered out (e.g. user passwords).
List of object attributes:
@@ -1158,6 +1218,10 @@ List of object attributes:
* target.project.id
* target.project.name
+* token
+ * target.token.user_id
+ * target.token.user.domain.id
+
The default policy.json file supplied provides a somewhat basic example of API
protection, and does not assume any particular use of domains. For multi-domain
configuration installations where, for example, a cloud provider wishes to
@@ -1186,7 +1250,7 @@ Ensure that your ``keystone.conf`` is configured to use a SQL driver:
.. code-block:: ini
[identity]
- driver = keystone.identity.backends.sql.Identity
+ driver = sql
You may also want to configure your ``[database]`` settings to better reflect
your environment:
@@ -1234,6 +1298,10 @@ through the normal REST API. At the moment, the following calls are supported:
* ``db_sync``: Sync the database.
* ``db_version``: Print the current migration version of the database.
+* ``domain_config_upload``: Upload domain configuration file.
+* ``fernet_rotate``: Rotate keys in the Fernet key repository.
+* ``fernet_setup``: Setup a Fernet key repository.
+* ``mapping_engine``: Test your federation mapping rules.
* ``mapping_purge``: Purge the identity mapping table.
* ``pki_setup``: Initialize the certificates used to sign tokens.
* ``saml_idp_metadata``: Generate identity provider metadata.
@@ -1629,23 +1697,23 @@ enable this option, you must have the following ``keystone.conf`` options set:
.. code-block:: ini
[identity]
- driver = keystone.identity.backends.ldap.Identity
+ driver = ldap
[resource]
- driver = keystone.resource.backends.sql.Resource
+ driver = sql
[assignment]
- driver = keystone.assignment.backends.sql.Assignment
+ driver = sql
[role]
- driver = keystone.assignment.role_backends.sql.Role
+ driver = sql
With the above configuration, Keystone will only lookup identity related
information such users, groups, and group membership from the directory, while
resources, roles and assignment related information will be provided by the SQL
backend. Also note that if there is an LDAP Identity, and no resource,
assignment or role backend is specified, they will default to LDAP. Although
-this may seem counterintuitive, it is provided for backwards compatibility.
+this may seem counter intuitive, it is provided for backwards compatibility.
Nonetheless, the explicit option will always override the implicit option, so
specifying the options as shown above will always be correct. Finally, it is
also worth noting that whether or not the LDAP accessible directory is to be
@@ -1662,7 +1730,7 @@ section:
.. NOTE::
- While having identity related infomration backed by LDAP while other
+ While having identity related information backed by LDAP while other
information is backed by SQL is a supported configuration, as shown above;
the opposite is not true. If either resource or assignment drivers are
configured for LDAP, then Identity must also be configured for LDAP.
@@ -1671,32 +1739,31 @@ Connection Pooling
------------------
Various LDAP backends in Keystone use a common LDAP module to interact with
-LDAP data. By default, a new connection is established for LDAP operations.
-This can become highly expensive when TLS support is enabled which is a likely
-configuraton in enterprise setup. Re-using of connectors from a connection pool
-drastically reduces overhead of initiating a new connection for every LDAP
+LDAP data. By default, a new connection is established for each LDAP operation.
+This can become highly expensive when TLS support is enabled, which is a likely
+configuration in an enterprise setup. Reuse of connectors from a connection
+pool drastically reduces overhead of initiating a new connection for every LDAP
operation.
-Keystone now provides connection pool support via configuration. This change
-will keep LDAP connectors alive and re-use for subsequent LDAP operations. A
-connection lifespan is going to be configurable with other pooling specific
-attributes. The change is made in LDAP handler layer logic which is primarily
-responsible for LDAP connection and shared common operations.
-
-In LDAP identity driver, Keystone authenticates end user by LDAP bind with user
-DN and provided password. These kind of auth binds can fill up the pool pretty
-quickly so a separate pool is provided for those end user auth bind calls. If a
-deployment does not want to use pool for those binds, then it can disable
-pooling selectively by ``use_auth_pool`` as false. If a deployment wants to use
-pool for those auth binds, then ``use_auth_pool`` needs to be true. For auth
-pool, a different pool size (``auth_pool_size``) and connection lifetime
-(``auth_pool_connection_lifetime``) can be specified. With enabled auth pool,
-its connection lifetime should be kept short so that pool frequently re-binds
-the connection with provided creds and works reliably in end user password
-change case. When ``use_pool`` is false (disabled), then auth pool
-configuration is also not used.
-
-Connection pool configuration is added in ``[ldap]`` configuration section:
+Keystone provides connection pool support via configuration. This will keep
+LDAP connectors alive and reused for subsequent LDAP operations. The connection
+lifespan is configurable as other pooling specific attributes.
+
+In the LDAP identity driver, Keystone authenticates end users via an LDAP bind
+with the user's DN and provided password. This kind of authentication bind
+can fill up the pool pretty quickly, so a separate pool is provided for end
+user authentication bind calls. If a deployment does not want to use a pool for
+those binds, then it can disable pooling selectively by setting
+``use_auth_pool`` to false. If a deployment wants to use a pool for those
+authentication binds, then ``use_auth_pool`` needs to be set to true. For the
+authentication pool, a different pool size (``auth_pool_size``) and connection
+lifetime (``auth_pool_connection_lifetime``) can be specified. With an enabled
+authentication pool, its connection lifetime should be kept short so that the
+pool frequently re-binds the connection with the provided credentials and works
+reliably in the end user password change case. When ``use_pool`` is false
+(disabled), then the authentication pool configuration is also not used.
+
+Connection pool configuration is part of the ``[ldap]`` configuration section:
.. code-block:: ini
diff --git a/keystone-moon/doc/source/configure_federation.rst b/keystone-moon/doc/source/configure_federation.rst
index 2da5f822..09d0984d 100644
--- a/keystone-moon/doc/source/configure_federation.rst
+++ b/keystone-moon/doc/source/configure_federation.rst
@@ -34,54 +34,72 @@ Keystone as a Service Provider (SP)
Prerequisites
-------------
-This approach to federation supports Keystone as a Service Provider, consuming
+This approach to federation supports keystone as a Service Provider, consuming
identity properties issued by an external Identity Provider, such as SAML
assertions or OpenID Connect claims.
-Federated users are not mirrored in the Keystone identity backend
+Federated users are not mirrored in the keystone identity backend
(for example, using the SQL driver). The external Identity Provider is
responsible for authenticating users, and communicates the result of
-authentication to Keystone using identity properties. Keystone maps these
-values to Keystone user groups and assignments created in Keystone.
+authentication to keystone using identity properties. Keystone maps these
+values to keystone user groups and assignments created in keystone.
The following configuration steps were performed on a machine running
Ubuntu 12.04 and Apache 2.2.22.
To enable federation, you'll need to:
-1. Run Keystone under Apache, rather than using ``keystone-all``.
+1. Run keystone under Apache, rather than using ``keystone-all``.
2. Configure Apache to use a federation capable authentication method.
-3. Enable ``OS-FEDERATION`` extension.
+3. Configure ``federation`` in keystone.
Configure Apache to use a federation capable authentication method
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-There are many ways to configure Federation in the Apache HTTPD server.
-Using Shibboleth and OpenID Connect are documented so far.
+There is currently support for two major federation protocols:
-* To use Shibboleth, follow the steps outlined at: `Setup Shibboleth`_.
-* To use OpenID Connect, follow the steps outlined at: `Setup OpenID Connect`_.
+* SAML - Keystone supports the following implementations:
-.. _`Setup Shibboleth`: extensions/shibboleth.html
-.. _`Setup OpenID Connect`: extensions/openidc.html
+ * Shibboleth - see `Setup Shibboleth`_.
+ * Mellon - see `Setup Mellon`_.
-Enable the ``OS-FEDERATION`` extension
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+* OpenID Connect - see `Setup OpenID Connect`_.
-Follow the steps outlined at: `Enabling Federation Extension`_.
+.. _`Setup Shibboleth`: federation/shibboleth.html
+.. _`Setup OpenID Connect`: federation/openidc.html
+.. _`Setup Mellon`: federation/mellon.html
-.. _`Enabling Federation Extension`: extensions/federation.html
+Configure keystone and Horizon for Single Sign-On
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-Configuring Federation
-----------------------
+* To configure horizon to access a federated keystone,
+ follow the steps outlined at: `Keystone Federation and Horizon`_.
-Now that the Identity Provider and Keystone are communicating we can start to
-configure the ``OS-FEDERATION`` extension.
+.. _`Keystone Federation and Horizon`: federation/websso.html
-1. Add local Keystone groups and roles
-2. Add Identity Provider(s), Mapping(s), and Protocol(s)
+Configuring Federation in Keystone
+-----------------------------------
+
+Now that the Identity Provider and keystone are communicating we can start to
+configure ``federation``.
+
+1. Configure authentication drivers in ``keystone.conf``
+2. Add local keystone groups and roles
+3. Add Identity Provider(s), Mapping(s), and Protocol(s)
+
+Configure authentication drivers in ``keystone.conf``
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Add the authentication methods to the ``[auth]`` section in ``keystone.conf``.
+Names should be equal to protocol names added via Identity API v3. Here we use
+examples ``saml2`` and ``openid``.
-Create Keystone groups and assign roles
+.. code-block:: bash
+
+ [auth]
+ methods = external,password,token,saml2,openid
+
+Create keystone groups and assign roles
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
As mentioned earlier, no new users will be added to the Identity backend, but
@@ -109,14 +127,14 @@ To utilize federation the following must be created in the Identity Service:
* Mapping
* Protocol
-More information on ``OS-FEDERATION`` can be found `here
+More information on ``federation in keystone`` can be found `here
<http://specs.openstack.org/openstack/keystone-specs/api/v3/identity-api-v3-os-federation-ext.html>`__.
~~~~~~~~~~~~~~~~~
Identity Provider
~~~~~~~~~~~~~~~~~
-Create an Identity Provider object in Keystone, which represents the Identity
+Create an Identity Provider object in keystone, which represents the Identity
Provider we will use to authenticate end users.
More information on identity providers can be found `here
@@ -129,8 +147,10 @@ A mapping is a list of rules. The only Identity API objects that will support ma
and users.
Mapping adds a set of rules to map federation protocol attributes to Identity API objects.
-An Identity Provider has exactly one mapping specified per protocol.
+There are many different ways to setup as well as combine these rules. More information on
+rules can be found on the :doc:`mapping_combinations` page.
+An Identity Provider has exactly one mapping specified per protocol.
Mapping objects can be used multiple times by different combinations of Identity Provider and Protocol.
More information on mapping can be found `here
@@ -150,7 +170,7 @@ you want to use with the combination of the IdP and Protocol.
Performing federated authentication
-----------------------------------
-1. Authenticate externally and generate an unscoped token in Keystone
+1. Authenticate externally and generate an unscoped token in keystone
2. Determine accessible resources
3. Get a scoped token
@@ -243,11 +263,22 @@ Keystone as an Identity Provider (IdP)
that will not be backported). These issues have been fixed and this feature
is considered stable and supported as of the Kilo release.
+.. NOTE::
+
+ This feature requires installation of the xmlsec1 tool via your
+ distribution packaging system (for instance apt or yum)
+
+ Example for apt:
+
+ .. code-block:: bash
+
+ $ apt-get install xmlsec1
+
Configuration Options
---------------------
There are certain settings in ``keystone.conf`` that must be setup, prior to
-attempting to federate multiple Keystone deployments.
+attempting to federate multiple keystone deployments.
Within ``keystone.conf``, assign values to the ``[saml]`` related fields, for
example:
@@ -270,7 +301,7 @@ also be setup. It is recommended that these values be URL safe.
idp_organization_display_name=Example Corp.
idp_organization_url=example.com
-As with the Organizaion options, the Contact options, are not necessary, but
+As with the Organization options, the Contact options, are not necessary, but
it's advisable to set these values too.
.. code-block:: ini
@@ -286,7 +317,7 @@ Generate Metadata
-----------------
In order to create a trust between the IdP and SP, metadata must be exchanged.
-To create metadata for your Keystone IdP, run the ``keystone-manage`` command
+To create metadata for your keystone IdP, run the ``keystone-manage`` command
and pipe the output to a file. For example:
.. code-block:: bash
@@ -304,33 +335,44 @@ In this example we are creating a new Service Provider with an ID of ``BETA``,
a ``sp_url`` of ``http://beta.example.com/Shibboleth.sso/POST/ECP`` and a
``auth_url`` of ``http://beta.example.com:5000/v3/OS-FEDERATION/identity_providers/beta/protocols/saml2/auth``
. The ``sp_url`` will be used when creating a SAML assertion for ``BETA`` and
-signed by the current Keystone IdP. The ``auth_url`` is used to retrieve the
-token for ``BETA`` once the SAML assertion is sent.
+signed by the current keystone IdP. The ``auth_url`` is used to retrieve the
+token for ``BETA`` once the SAML assertion is sent. Although the ``enabled``
+field is optional we are passing it set to ``true`` otherwise it will be set to
+``false`` by default.
.. code-block:: bash
$ curl -s -X PUT \
-H "X-Auth-Token: $OS_TOKEN" \
-H "Content-Type: application/json" \
- -d '{"service_provider": {"auth_url": "http://beta.example.com:5000/v3/OS-FEDERATION/identity_providers/beta/protocols/saml2/auth", "sp_url": "https://example.com:5000/Shibboleth.sso/SAML2/ECP"}' \
+ -d '{"service_provider": {"auth_url": "http://beta.example.com:5000/v3/OS-FEDERATION/identity_providers/beta/protocols/saml2/auth", "sp_url": "https://example.com:5000/Shibboleth.sso/SAML2/ECP", "enabled": true}' \
http://localhost:5000/v3/service_providers/BETA | python -mjson.tool
Testing it all out
------------------
-Lastly, if a scoped token and a Service Provider region are presented to
-Keystone, the result will be a full SAML Assertion, signed by the IdP
-Keystone, specifically intended for the Service Provider Keystone.
+Lastly, if a scoped token and a Service Provider scope are presented to the
+local keystone, the result will be a full ECP wrapped SAML Assertion,
+specifically intended for the Service Provider keystone.
+
+.. NOTE::
+ ECP stands for Enhanced Client or Proxy, an extension from the SAML2
+ protocol used in non-browser interfaces, like in the following example
+ with cURL.
.. code-block:: bash
$ curl -s -X POST \
-H "Content-Type: application/json" \
-d '{"auth": {"scope": {"service_provider": {"id": "BETA"}}, "identity": {"token": {"id": "d793d935b9c343f783955cf39ee7dc3c"}, "methods": ["token"]}}}' \
- http://localhost:5000/v3/auth/OS-FEDERATION/saml2
+ http://localhost:5000/v3/auth/OS-FEDERATION/saml2/ecp
+
+.. NOTE::
+ Use URL http://localhost:5000/v3/auth/OS-FEDERATION/saml2 to request for
+ pure SAML Assertions.
-At this point the SAML Assertion can be sent to the Service Provider Keystone
-using the provided ``auth_url`` in the ``X-Auth-Url`` header present in the
-response containing the SAML Assertion, and a valid OpenStack token, issued by
-a Service Provider Keystone, will be returned.
+At this point the ECP wrapped SAML Assertion can be sent to the Service
+Provider keystone using the provided ``auth_url`` in the ``X-Auth-Url`` header
+present in the response containing the Assertion, and a valid OpenStack
+token, issued by a Service Provider keystone, will be returned.
diff --git a/keystone-moon/doc/source/developing.rst b/keystone-moon/doc/source/developing.rst
index 33b2dd58..50fed9e5 100644
--- a/keystone-moon/doc/source/developing.rst
+++ b/keystone-moon/doc/source/developing.rst
@@ -48,7 +48,7 @@ To run the Keystone Admin and API server instances, use:
.. code-block:: bash
- $ tools/with_venv.sh bin/keystone-all
+ $ tools/with_venv.sh keystone-all
This runs Keystone with the configuration the etc/ directory of the project.
See :doc:`configuration` for details on how Keystone is configured. By default,
@@ -74,7 +74,8 @@ place:
$ bin/keystone-manage db_sync
-.. _`python-keystoneclient`: https://github.com/openstack/python-keystoneclient
+.. _`python-keystoneclient`: https://git.openstack.org/cgit/openstack/python-keystoneclient
+.. _`openstackclient`: https://git.openstack.org/cgit/openstack/python-openstackclient
If the above commands result in a ``KeyError``, or they fail on a
``.pyc`` file with the message, ``You can only have one Python script per
@@ -91,9 +92,13 @@ following from the Keystone root project directory:
Database Schema Migrations
--------------------------
-Keystone uses SQLAlchemy-migrate_ to migrate
-the SQL database between revisions. For core components, the migrations are
-kept in a central repository under ``keystone/common/sql/migrate_repo``.
+Keystone uses SQLAlchemy-migrate_ to migrate the SQL database between
+revisions. For core components, the migrations are kept in a central
+repository under ``keystone/common/sql/migrate_repo/versions``. Each
+SQL migration has a version which can be identified by the name of
+the script, the version is the number before the underline.
+For example, if the script is named ``001_add_X_table.py`` then the
+version of the SQL migration is ``1``.
.. _SQLAlchemy-migrate: http://code.google.com/p/sqlalchemy-migrate/
@@ -103,11 +108,13 @@ but should instead have its own repository. This repository must be in the
extension's directory in ``keystone/contrib/<extension>/migrate_repo``. In
addition, it needs a subdirectory named ``versions``. For example, if the
extension name is ``my_extension`` then the directory structure would be
-``keystone/contrib/my_extension/migrate_repo/versions/``. For the migration to
-work, both the ``migrate_repo`` and ``versions`` subdirectories must have
-``__init__.py`` files. SQLAlchemy-migrate will look for a configuration file in
-the ``migrate_repo`` named ``migrate.cfg``. This conforms to a key/value `ini`
-file format. A sample configuration file with the minimal set of values is::
+``keystone/contrib/my_extension/migrate_repo/versions/``.
+
+For the migration to work, both the ``migrate_repo`` and ``versions``
+subdirectories must have ``__init__.py`` files. SQLAlchemy-migrate will look
+for a configuration file in the ``migrate_repo`` named ``migrate.cfg``. This
+conforms to a key/value `ini` file format. A sample configuration file with
+the minimal set of values is::
[db_settings]
repository_id=my_extension
@@ -117,13 +124,33 @@ file format. A sample configuration file with the minimal set of values is::
The directory ``keystone/contrib/example`` contains a sample extension
migration.
-Migrations must be explicitly run for each extension individually. To run a
-migration for a specific extension, simply run:
+For core components, to run a migration for upgrade, simply run:
+
+.. code-block:: bash
+
+ $ keystone-manage db_sync <version>
+
+.. NOTE::
+
+ If no version is specified, then the most recent migration will be used.
+
+For extensions, migrations must be explicitly run for each extension individually.
+To run a migration for a specific extension, simply run:
.. code-block:: bash
$ keystone-manage db_sync --extension <name>
+.. NOTE::
+
+ The meaning of "extension" here has been changed since all of the
+ "extension" are loaded and the migrations are run by default, but
+ the source is maintained in a separate directory.
+
+.. NOTE::
+
+ Schema downgrades are not supported for both core components and extensions.
+
Initial Sample Data
-------------------
@@ -132,18 +159,24 @@ data for use with keystone:
.. code-block:: bash
- $ OS_SERVICE_TOKEN=ADMIN tools/with_venv.sh tools/sample_data.sh
+ $ OS_TOKEN=ADMIN tools/with_venv.sh tools/sample_data.sh
Notice it requires a service token read from an environment variable for
authentication. The default value "ADMIN" is from the ``admin_token``
option in the ``[DEFAULT]`` section in ``etc/keystone.conf``.
Once run, you can see the sample data that has been created by using the
-`python-keystoneclient`_ command-line interface:
+`openstackclient`_ command-line interface:
+
+.. code-block:: bash
+
+ $ tools/with_venv.sh openstack --os-token ADMIN --os-url http://127.0.0.1:35357/v2.0/ user list
+
+The `openstackclient`_ can be installed using the following:
.. code-block:: bash
- $ tools/with_venv.sh keystone --os-token ADMIN --os-endpoint http://127.0.0.1:35357/v2.0/ user-list
+ $ tools/with_venv.sh pip install python-openstackclient
Filtering responsibilities between controllers and drivers
----------------------------------------------------------
@@ -264,7 +297,7 @@ you'll normally only want to run the test that hits your breakpoint:
.. code-block:: bash
- $ tox -e debug keystone.tests.test_auth.AuthWithToken.test_belongs_to
+ $ tox -e debug keystone.tests.unit.test_auth.AuthWithToken.test_belongs_to
For reference, the ``debug`` tox environment implements the instructions
here: https://wiki.openstack.org/wiki/Testr#Debugging_.28pdb.29_Tests
@@ -291,31 +324,29 @@ For example, to discard logging data during a test run:
Test Structure
==============
-Not all of the tests in the tests directory are strictly unit tests. Keystone
-intentionally includes tests that run the service locally and drives the entire
-configuration to achieve basic functional testing.
+Not all of the tests in the keystone/tests/unit directory are strictly unit
+tests. Keystone intentionally includes tests that run the service locally and
+drives the entire configuration to achieve basic functional testing.
-For the functional tests, an in-memory key-value store is used to keep the
-tests fast.
+For the functional tests, an in-memory key-value store or in-memory sqlite
+database is used to keep the tests fast.
-Within the tests directory, the general structure of the tests is a basic
-set of tests represented under a test class, and then subclasses of those
+Within the tests directory, the general structure of the backend tests is a
+basic set of tests represented under a test class, and then subclasses of those
tests under other classes with different configurations to drive different
backends through the APIs.
For example, ``test_backend.py`` has a sequence of tests under the class
-``IdentityTests`` that will work with the default drivers as configured in
-this projects etc/ directory. ``test_backend_sql.py`` subclasses those tests,
-changing the configuration by overriding with configuration files stored in
-the tests directory aimed at enabling the SQL backend for the Identity module.
-
-Likewise, ``test_v2_keystoneclient.py`` takes advantage of the tests written
-against ``KeystoneClientTests`` to verify the same tests function through
-different drivers and releases of the Keystone client.
+:class:`~keystone.tests.unit.test_backend.IdentityTests` that will work with
+the default drivers as configured in this project's etc/ directory.
+``test_backend_sql.py`` subclasses those tests, changing the configuration by
+overriding with configuration files stored in the ``tests/unit/config_files``
+directory aimed at enabling the SQL backend for the Identity module.
-The class ``CompatTestCase`` does the work of checking out a specific version
-of python-keystoneclient, and then verifying it against a temporarily running
-local instance to explicitly verify basic functional testing across the API.
+:class:`keystone.tests.unit.test_v2_keystoneclient.ClientDrivenTestCase`
+uses the installed python-keystoneclient, verifying it against a temporarily
+running local keystone instance to explicitly verify basic functional testing
+across the API.
Testing Schema Migrations
=========================
@@ -325,7 +356,8 @@ built-in test runner, one migration at a time.
.. WARNING::
- This may leave your database in an inconsistent state; attempt this in non-production environments only!
+ This may leave your database in an inconsistent state; attempt this in
+ non-production environments only!
This is useful for testing the *next* migration in sequence (both forward &
backward) in a database under version control:
@@ -344,9 +376,17 @@ of your data during migration.
Writing Tests
=============
-To add tests covering all drivers, update the relevant base test class
-(``test_backend.py``, ``test_legacy_compat.py``, and
-``test_keystoneclient.py``).
+To add tests covering all drivers, update the base test class in
+``test_backend.py``.
+
+.. NOTE::
+
+ The structure of backend testing is in transition, migrating from having
+ all classes in a single file (test_backend.py) to one where there is a
+ directory structure to reduce the size of the test files. See:
+
+ - :mod:`keystone.tests.unit.backend.role`
+ - :mod:`keystone.tests.unit.backend.domain_config`
To add new drivers, subclass the ``test_backend.py`` (look towards
``test_backend_sql.py`` or ``test_backend_kvs.py`` for examples) and update the
@@ -363,9 +403,9 @@ You may also be interested in either the
`OpenStack Continuous Integration Infrastructure`_ or the
`OpenStack Integration Testing Project`_.
-.. _devstack: http://devstack.org/
-.. _OpenStack Continuous Integration Infrastructure: http://ci.openstack.org
-.. _OpenStack Integration Testing Project: https://github.com/openstack/tempest
+.. _devstack: http://docs.openstack.org/developer/devstack/
+.. _OpenStack Continuous Integration Infrastructure: http://docs.openstack.org/infra/system-config
+.. _OpenStack Integration Testing Project: https://git.openstack.org/cgit/openstack/tempest
LDAP Tests
@@ -379,15 +419,16 @@ and set environment variables ``KEYSTONE_IDENTITY_BACKEND=ldap`` and
``KEYSTONE_CLEAR_LDAP=yes`` in your ``localrc`` file.
The unit tests can be run against a live server with
-``keystone/tests/test_ldap_livetest.py`` and
-``keystone/tests/test_ldap_pool_livetest.py``. The default password is ``test``
-but if you have installed devstack with a different LDAP password, modify the
-file ``keystone/tests/config_files/backend_liveldap.conf`` and
-``keystone/tests/config_files/backend_pool_liveldap.conf`` to reflect your password.
+``keystone/tests/unit/test_ldap_livetest.py`` and
+``keystone/tests/unit/test_ldap_pool_livetest.py``. The default password is
+``test`` but if you have installed devstack with a different LDAP password,
+modify the file ``keystone/tests/unit/config_files/backend_liveldap.conf`` and
+``keystone/tests/unit/config_files/backend_pool_liveldap.conf`` to reflect your
+password.
.. NOTE::
- To run the live tests you need to set the environment variable ``ENABLE_LDAP_LIVE_TEST``
- to a non-negative value.
+ To run the live tests you need to set the environment variable
+ ``ENABLE_LDAP_LIVE_TEST`` to a non-negative value.
"Work in progress" Tests
@@ -405,21 +446,22 @@ including:
used to catch bug regressions and commit it before any code is
written.
-The ``keystone.tests.util.wip`` decorator can be used to mark a test as
-WIP. A WIP test will always be run. If the test fails then a TestSkipped
+The :func:`keystone.tests.unit.utils.wip` decorator can be used to mark a test
+as WIP. A WIP test will always be run. If the test fails then a TestSkipped
exception is raised because we expect the test to fail. We do not pass
the test in this case so that it doesn't count toward the number of
successfully run tests. If the test passes an AssertionError exception is
raised so that the developer knows they made the test pass. This is a
reminder to remove the decorator.
-The ``wip`` decorator requires that the author provides a message. This
-message is important because it will tell other developers why this test
-is marked as a work in progress. Reviewers will require that these
-messages are descriptive and accurate.
+The :func:`~keystone.tests.unit.utils.wip` decorator requires that the author
+provides a message. This message is important because it will tell other
+developers why this test is marked as a work in progress. Reviewers will
+require that these messages are descriptive and accurate.
.. NOTE::
- The ``wip`` decorator is not a replacement for skipping tests.
+ The :func:`~keystone.tests.unit.utils.wip` decorator is not a replacement for
+ skipping tests.
.. code-block:: python
@@ -427,6 +469,10 @@ messages are descriptive and accurate.
def test():
pass
+.. NOTE::
+ Another strategy is to not use the wip decorator and instead show how the
+ code currently incorrectly works. Which strategy is chosen is up to the
+ developer.
Generating Updated Sample Config File
-------------------------------------
@@ -435,19 +481,23 @@ Keystone's sample configuration file ``etc/keystone.conf.sample`` is automatical
generated based upon all of the options available within Keystone. These options
are sourced from the many files around Keystone as well as some external libraries.
-If new options are added, primarily located in ``keystone.common.config``, a new
-sample configuration file needs to be generated. Generating a new sample configuration
-to be included in a commit run:
+The sample configuration file is now kept up to date by an infra job that
+generates the config file and if there are any changes will propose a review
+as the OpenStack Proposal Bot. Developers should *NOT* generate the config file
+and propose it as part of their patches since the proposal bot will do this for
+you.
+
+To generate a new sample configuration to see what it looks like, run:
.. code-block:: bash
- $ tox -esample_config -r
+ $ tox -egenconfig -r
The tox command will place an updated sample config in ``etc/keystone.conf.sample``.
If there is a new external library (e.g. ``oslo.messaging``) that utilizes the
``oslo.config`` package for configuration, it can be added to the list of libraries
-found in ``tools/config/oslo.config.generator.rc``.
+found in ``config-generator/keystone.conf``.
Translated responses
@@ -768,4 +818,4 @@ The documentation is generated with Sphinx using the tox command. To create HTM
$ tox -e docs
-The results are in the docs/build/html and docs/build/man directories respectively.
+The results are in the doc/build/html and doc/build/man directories respectively.
diff --git a/keystone-moon/doc/source/extension_development.rst b/keystone-moon/doc/source/extension_development.rst
index a0248495..0805af40 100644
--- a/keystone-moon/doc/source/extension_development.rst
+++ b/keystone-moon/doc/source/extension_development.rst
@@ -70,7 +70,7 @@ must follow the config file conventions and introduce a dedicated section.
Example::
[example]
- driver = keystone.contrib.example.backends.sql.mySQLClass
+ driver = sql
[my_other_extension]
extension_flag = False
@@ -81,7 +81,7 @@ extensions are disabled.
Example::
[example]
- #driver = keystone.contrib.example.backends.sql.mySQLClass
+ #driver = sql
[my_other_extension]
#extension_flag = False
diff --git a/keystone-moon/doc/source/extensions.rst b/keystone-moon/doc/source/extensions.rst
index f3bade9b..0a6b34a2 100644
--- a/keystone-moon/doc/source/extensions.rst
+++ b/keystone-moon/doc/source/extensions.rst
@@ -86,26 +86,6 @@ a policy ID.
* `API Specification for Endpoint Policy <http://specs.openstack.org/openstack/keystone-specs/api/v3/identity-api-v3-os-endpoint-policy.html>`__
-----------
-Federation
-----------
-
-The Federation extension provides the ability for users to manage Identity
-Providers (IdPs) and establish a set of rules to map federation protocol
-attributes to Identity API attributes.
-
-.. NOTE:: Support status for Federation
-
- *Experimental* (Icehouse, Juno)
- *Stable* (Kilo)
-
-.. toctree::
- :maxdepth: 1
-
- extensions/federation.rst
-
-* `API Specification for Federation <http://specs.openstack.org/openstack/keystone-specs/api/v3/identity-api-v3-os-federation-ext.html>`__
-
-------
Inherit
-------
diff --git a/keystone-moon/doc/source/extensions/endpoint_filter.rst b/keystone-moon/doc/source/extensions/endpoint_filter.rst
index 66198503..4ab194b8 100644
--- a/keystone-moon/doc/source/extensions/endpoint_filter.rst
+++ b/keystone-moon/doc/source/extensions/endpoint_filter.rst
@@ -24,7 +24,7 @@ To enable the endpoint filter extension:
in ``keystone.conf``. For example::
[catalog]
- driver = keystone.contrib.endpoint_filter.backends.catalog_sql.EndpointFilterCatalog
+ driver = catalog_sql
2. Add the ``endpoint_filter_extension`` filter to the ``api_v3`` pipeline in
``keystone-paste.ini``. This must be added after ``json_body`` and before
diff --git a/keystone-moon/doc/source/extensions/endpoint_policy.rst b/keystone-moon/doc/source/extensions/endpoint_policy.rst
index 86ff2264..ad403d50 100644
--- a/keystone-moon/doc/source/extensions/endpoint_policy.rst
+++ b/keystone-moon/doc/source/extensions/endpoint_policy.rst
@@ -21,7 +21,7 @@ To enable the endpoint policy extension:
``[endpoint_policy]`` section in ``keystone.conf``. For example::
[endpoint_policy]
- driver = keystone.contrib.endpoint_policy.backends.sql.EndpointPolicy
+ driver = sql
2. Add the ``endpoint_policy_extension`` policy to the ``api_v3`` pipeline in
``keystone-paste.ini``. This must be added after ``json_body`` and before
diff --git a/keystone-moon/doc/source/extensions/oauth1.rst b/keystone-moon/doc/source/extensions/oauth1.rst
index c89ee126..29955d74 100644
--- a/keystone-moon/doc/source/extensions/oauth1.rst
+++ b/keystone-moon/doc/source/extensions/oauth1.rst
@@ -23,13 +23,12 @@ To enable the OAuth1 extension:
1. Optionally, add the oauth1 extension driver to the ``[oauth1]`` section in ``keystone.conf``. For example::
[oauth1]
- driver = keystone.contrib.oauth1.backends.sql.OAuth1
+ driver = sql
2. Add the ``oauth1`` authentication method to the ``[auth]`` section in ``keystone.conf``::
[auth]
methods = external,password,token,oauth1
- oauth1 = keystone.auth.plugins.oauth1.OAuth
3. Add the ``oauth1_extension`` filter to the ``api_v3`` pipeline in
``keystone-paste.ini``. This must be added after ``json_body`` and before
diff --git a/keystone-moon/doc/source/extensions/revoke.rst b/keystone-moon/doc/source/extensions/revoke.rst
index e8a25ce9..a89e359d 100644
--- a/keystone-moon/doc/source/extensions/revoke.rst
+++ b/keystone-moon/doc/source/extensions/revoke.rst
@@ -27,7 +27,7 @@ Enabling the Revocation Extension
in ``keystone.conf``. For example::
[revoke]
- driver = keystone.contrib.revoke.backends.sql.Revoke
+ driver = sql
2. Add the required ``filter`` to the ``pipeline`` in ``keystone-paste.ini``.
This must be added after ``json_body`` and before the last entry in the
diff --git a/keystone-moon/doc/source/external-auth.rst b/keystone-moon/doc/source/external-auth.rst
index 5f3c9af8..4b545e4f 100644
--- a/keystone-moon/doc/source/external-auth.rst
+++ b/keystone-moon/doc/source/external-auth.rst
@@ -28,15 +28,14 @@ To configure the plugin that should be used set the ``external`` option again
in the ``auth`` section. There are two external authentication method plugins
provided by Keystone:
-* ``keystone.auth.plugins.external.Default``: This plugin won't take into
- account the domain information that the external authentication method may
- pass down to Keystone and will always use the configured default domain. The
- ``REMOTE_USER`` variable is the username.
-
-* ``keystone.auth.plugins.external.Domain``: This plugin expects that the
- ``REMOTE_DOMAIN`` variable contains the domain for the user. If this variable
- is not present, the configured default domain will be used. The
- ``REMOTE_USER`` variable is the username.
+* ``DefaultDomain``: This plugin won't take into account the domain information
+ that the external authentication method may pass down to Keystone and will
+ always use the configured default domain. The ``REMOTE_USER`` variable is the
+ username. This is the default if no plugin is given.
+
+* ``Domain``: This plugin expects that the ``REMOTE_DOMAIN`` variable contains
+ the domain for the user. If this variable is not present, the configured
+ default domain will be used. The ``REMOTE_USER`` variable is the username.
Using HTTPD authentication
==========================
diff --git a/keystone-moon/doc/source/federation/mellon.rst b/keystone-moon/doc/source/federation/mellon.rst
new file mode 100644
index 00000000..9c4675b7
--- /dev/null
+++ b/keystone-moon/doc/source/federation/mellon.rst
@@ -0,0 +1,122 @@
+:orphan:
+
+..
+ Licensed under the Apache License, Version 2.0 (the "License"); you may
+ not use this file except in compliance with the License. You may obtain
+ a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ License for the specific language governing permissions and limitations
+ under the License.
+
+==============================
+Setup Mellon (mod_auth_mellon)
+==============================
+
+Configure Apache HTTPD for mod_auth_mellon
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Follow the steps outlined at: `Running Keystone in HTTPD`_.
+
+.. _`Running Keystone in HTTPD`: ../apache-httpd.html
+
+You'll also need to install the Apache module `mod_auth_mellon
+<https://github.com/UNINETT/mod_auth_mellon>`_. For example:
+
+.. code-block:: bash
+
+ $ apt-get install libapache2-mod-auth-mellon
+
+Configure your Keystone virtual host and adjust the config to properly handle SAML2 workflow:
+
+Add *WSGIScriptAlias* directive to your vhost configuration::
+
+ WSGIScriptAliasMatch ^(/v3/OS-FEDERATION/identity_providers/.*?/protocols/.*?/auth)$ /var/www/keystone/main/$1
+
+Make sure the *wsgi-keystone.conf* contains a *<Location>* directive for the Mellon module and
+a *<Location>* directive for each identity provider::
+
+ <Location /v3>
+ MellonEnable "info"
+ MellonSPPrivateKeyFile /etc/httpd/mellon/http_keystone.fqdn.key
+ MellonSPCertFile /etc/httpd/mellon/http_keystone.fqdn.cert
+ MellonSPMetadataFile /etc/httpd/mellon/http_keystone.fqdn.xml
+ MellonIdPMetadataFile /etc/httpd/mellon/idp-metadata.xml
+ MellonEndpointPath /v3/OS-FEDERATION/identity_providers/idp_1/protocols/saml2/auth/mellon
+ MellonIdP "IDP"
+ </Location>
+
+ <Location /v3/OS-FEDERATION/identity_providers/idp_1/protocols/saml2/auth>
+ AuthType "Mellon"
+ MellonEnable "auth"
+ </Location>
+
+.. NOTE::
+ * See below for information about how to generate the values for the
+ `MellonSPMetadataFile`, etc. directives.
+ * ``saml2`` may be different in your deployment, but do not use a wildcard value.
+ Otherwise *every* federated protocol will be handled by Mellon.
+ * ``idp_1`` has to be replaced with the name associated with the IdP in Keystone.
+ * You are advised to carefully examine `mod_auth_mellon Apache
+ configuration documentation
+ <https://github.com/UNINETT/mod_auth_mellon>`_
+
+Enable the Keystone virtual host, for example:
+
+.. code-block:: bash
+
+ $ a2ensite wsgi-keystone.conf
+
+Enable the ``ssl`` and ``auth_mellon`` modules, for example:
+
+.. code-block:: bash
+
+ $ a2enmod ssl
+ $ a2enmod auth_mellon
+
+Restart the Apache instance that is serving Keystone, for example:
+
+.. code-block:: bash
+
+ $ service apache2 restart
+
+Configuring the Mellon SP Metadata
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Mellon provides a script called ``mellon_create_metadata.sh`` which generates the
+values for the config directives `MellonSPPrivateKeyFile`, `MellonSPCertFile`,
+and `MellonSPMetadataFile`. It is run like this:
+
+.. code-block:: bash
+
+ $ mellon_create_metadata.sh http://keystone.fqdn:5000 \
+ http://keystone.fqdn:5000/v3/OS-FEDERATION/identity_providers/idp_1/protocols/saml2/auth/mellon
+
+The first parameter is used as the entity ID, a unique identifier for this
+Keystone SP. You do not have to use the URL, but it is an easy way to uniquely
+identify each Keystone SP. The second parameter is the full URL for the
+endpoint path corresponding to the parameter `MellonEndpointPath`.
+
+Fetch your Service Provider's Metadata file. This corresponds to the value of
+the `MellonIdPMetadataFile` directive above. For example:
+
+.. code-block:: bash
+
+ $ wget --cacert /path/to/ca.crt -O /etc/httpd/mellon/idp-metadata.xml \
+ https://idp.fqdn/idp/saml2/metadata
+
+Upload your Service Provider's Metadata file to your Identity Provider. This
+is the file used as the value of the `MellonSPMetadataFile` in the config,
+generated by the `mellon_create_metadata.sh` script. The IdP may provide a
+webpage where you can upload the file, or you may be required to submit the
+file using `wget` or `curl`. Please check your IdP documentation for details.
+
+Once you are done, restart the Apache instance that is serving Keystone, for example:
+
+.. code-block:: bash
+
+ $ service apache2 restart
diff --git a/keystone-moon/doc/source/federation/openidc.rst b/keystone-moon/doc/source/federation/openidc.rst
new file mode 100644
index 00000000..ece82d3a
--- /dev/null
+++ b/keystone-moon/doc/source/federation/openidc.rst
@@ -0,0 +1,94 @@
+:orphan:
+
+..
+ Licensed under the Apache License, Version 2.0 (the "License"); you may
+ not use this file except in compliance with the License. You may obtain
+ a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ License for the specific language governing permissions and limitations
+ under the License.
+
+====================
+Setup OpenID Connect
+====================
+
+Configuring mod_auth_openidc
+============================
+
+Federate Keystone (SP) and an external IdP using OpenID Connect (`mod_auth_openidc`_)
+
+.. _`mod_auth_openidc`: https://github.com/pingidentity/mod_auth_openidc
+
+To install `mod_auth_openidc` on Ubuntu, perform the following:
+
+.. code-block:: bash
+
+ sudo apt-get install libapache2-mod-auth-openidc
+
+This module is available for other distributions (Fedora/CentOS/Red Hat) from:
+https://github.com/pingidentity/mod_auth_openidc/releases
+
+In the keystone Apache site file, add the following as a top level option, to
+load the `mod_auth_openidc` module:
+
+.. code-block:: xml
+
+ LoadModule auth_openidc_module /usr/lib/apache2/modules/mod_auth_openidc.so
+
+Also within the same file, locate the virtual host entry and add the following
+entries for OpenID Connect:
+
+.. code-block:: xml
+
+ <VirtualHost *:5000>
+
+ ...
+
+ OIDCClaimPrefix "OIDC-"
+ OIDCResponseType "id_token"
+ OIDCScope "openid email profile"
+ OIDCProviderMetadataURL <url_of_provider_metadata>
+ OIDCClientID <openid_client_id>
+ OIDCClientSecret <openid_client_secret>
+ OIDCCryptoPassphrase openstack
+ OIDCRedirectURI http://localhost:5000/v3/OS-FEDERATION/identity_providers/<idp_id>/protocols/oidc/auth/redirect
+
+ <LocationMatch /v3/OS-FEDERATION/identity_providers/.*?/protocols/oidc/auth>
+ AuthType openid-connect
+ Require valid-user
+ LogLevel debug
+ </LocationMatch>
+ </VirtualHost>
+
+Note an example of an `OIDCProviderMetadataURL` instance is: https://accounts.google.com/.well-known/openid-configuration
+If not using `OIDCProviderMetadataURL`, then the following attributes
+must be specified: `OIDCProviderIssuer`, `OIDCProviderAuthorizationEndpoint`,
+`OIDCProviderTokenEndpoint`, `OIDCProviderTokenEndpointAuth`,
+`OIDCProviderUserInfoEndpoint`, and `OIDCProviderJwksUri`
+
+Note, if using a mod_wsgi version less than 4.3.0, then the `OIDCClaimPrefix`
+must be specified to have only alphanumerics or a dash ("-"). This is because
+mod_wsgi blocks headers that do not fit this criteria. See http://modwsgi.readthedocs.org/en/latest/release-notes/version-4.3.0.html#bugs-fixed
+for more details
+
+Once you are done, restart your Apache daemon:
+
+.. code-block:: bash
+
+ $ service apache2 restart
+
+Tips
+====
+
+1. When creating a mapping, note that the 'remote' attributes will be prefixed,
+ with `HTTP_`, so for instance, if you set OIDCClaimPrefix to `OIDC-`, then a
+ typical remote value to check for is: `HTTP_OIDC_ISS`.
+
+2. Don't forget to add oidc as an [auth] plugin in keystone.conf, see `Step 2`_
+
+.. _`Step 2`: federation/federation.html
diff --git a/keystone-moon/doc/source/federation/shibboleth.rst b/keystone-moon/doc/source/federation/shibboleth.rst
new file mode 100644
index 00000000..d67cfa1a
--- /dev/null
+++ b/keystone-moon/doc/source/federation/shibboleth.rst
@@ -0,0 +1,279 @@
+:orphan:
+
+..
+ Licensed under the Apache License, Version 2.0 (the "License"); you may
+ not use this file except in compliance with the License. You may obtain
+ a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ License for the specific language governing permissions and limitations
+ under the License.
+
+================
+Setup Shibboleth
+================
+
+Configure Apache HTTPD for mod_shibboleth
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Follow the steps outlined at: `Running Keystone in HTTPD`_.
+
+.. _`Running Keystone in HTTPD`: ../apache-httpd.html
+
+You'll also need to install `Shibboleth <https://wiki.shibboleth.net/confluence/display/SHIB2/Home>`_, for
+example:
+
+.. code-block:: bash
+
+ $ apt-get install libapache2-mod-shib2
+
+Configure your Keystone virtual host and adjust the config to properly handle SAML2 workflow:
+
+Add *WSGIScriptAlias* directive to your vhost configuration::
+
+ WSGIScriptAliasMatch ^(/v3/OS-FEDERATION/identity_providers/.*?/protocols/.*?/auth)$ /var/www/keystone/main/$1
+
+Make sure the *wsgi-keystone.conf* contains a *<Location>* directive for the Shibboleth module and
+a *<Location>* directive for each identity provider::
+
+ <Location /Shibboleth.sso>
+ SetHandler shib
+ </Location>
+
+ <Location /v3/OS-FEDERATION/identity_providers/idp_1/protocols/saml2/auth>
+ ShibRequestSetting requireSession 1
+ ShibRequestSetting applicationId idp_1
+ AuthType shibboleth
+ ShibRequireAll On
+ ShibRequireSession On
+ ShibExportAssertion Off
+ Require valid-user
+ </Location>
+
+.. NOTE::
+ * ``saml2`` may be different in your deployment, but do not use a wildcard value.
+ Otherwise *every* federated protocol will be handled by Shibboleth.
+ * ``idp_1`` has to be replaced with the name associated with the idp in Keystone.
+ The same name is used inside the shibboleth2.xml configuration file but they could
+ be different.
+ * The ``ShibRequireSession`` and ``ShibRequireAll`` rules are invalid in
+ Apache 2.4+ and should be dropped in that specific setup.
+ * You are advised to carefully examine `Shibboleth Apache configuration
+ documentation
+ <https://wiki.shibboleth.net/confluence/display/SHIB2/NativeSPApacheConfig>`_
+
+Enable the Keystone virtual host, for example:
+
+.. code-block:: bash
+
+ $ a2ensite wsgi-keystone.conf
+
+Enable the ``ssl`` and ``shib2`` modules, for example:
+
+.. code-block:: bash
+
+ $ a2enmod ssl
+ $ a2enmod shib2
+
+Restart Apache, for example:
+
+.. code-block:: bash
+
+ $ service apache2 restart
+
+Configuring shibboleth2.xml
+~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Once you have your Keystone vhost (virtual host) ready, it's then time to
+configure Shibboleth and upload your Metadata to the Identity Provider.
+
+If new certificates are required, they can be easily created by executing:
+
+.. code-block:: bash
+
+ $ shib-keygen -y <number of years>
+
+The newly created file will be stored under ``/etc/shibboleth/sp-key.pem``
+
+You should fetch your Service Provider's Metadata file. Typically this can be
+achieved by simply fetching a Metadata file, for example:
+
+.. code-block:: bash
+
+ $ wget --no-check-certificate -O <name of the file> https://service.example.org/Shibboleth.sso/Metadata
+
+Upload your Service Provider's Metadata file to your Identity Provider.
+This step depends on your Identity Provider choice and is not covered here.
+
+Configure your Service Provider by editing ``/etc/shibboleth/shibboleth2.xml``
+file. You are advised to examine `Shibboleth Service Provider Configuration documentation <https://wiki.shibboleth.net/confluence/display/SHIB2/Configuration>`_
+
+An example of your ``/etc/shibboleth/shibboleth2.xml`` may look like
+(The example shown below is for reference only, not to be used in a production
+environment):
+
+.. code-block:: xml
+
+ <!--
+ File configuration courtesy of http://testshib.org
+
+ More information:
+ https://wiki.shibboleth.net/confluence/display/SHIB2/NativeSPConfiguration
+ -->
+
+ <SPConfig xmlns="urn:mace:shibboleth:2.0:native:sp:config"
+ xmlns:md="urn:oasis:names:tc:SAML:2.0:metadata" clockSkew="1800 ">
+
+ <!-- The entityID is the name TestShib made for your SP. -->
+ <ApplicationDefaults entityID="https://<yourhosthere>/shibboleth">
+
+ <!--
+ You should use secure cookies if at all possible.
+ See cookieProps in this Wiki article.
+ -->
+ <!-- https://wiki.shibboleth.net/confluence/display/SHIB2/NativeSPSessions -->
+ <Sessions lifetime="28800" timeout="3600" checkAddress="false"
+ relayState="ss:mem" handlerSSL="false">
+
+ <!-- Triggers a login request directly to the TestShib IdP. -->
+ <!-- https://wiki.shibboleth.net/confluence/display/SHIB2/NativeSPServiceSSO -->
+ <SSO entityID="https://<idp-url>/idp/shibboleth" ECP="true">
+ SAML2 SAML1
+ </SSO>
+
+ <!-- SAML and local-only logout. -->
+ <!-- https://wiki.shibboleth.net/confluence/display/SHIB2/NativeSPServiceLogout -->
+ <Logout>SAML2 Local</Logout>
+
+ <!--
+ Handlers allow you to interact with the SP and gather
+ more information. Try them out!
+ Attribute value s received by the SP through SAML
+ will be visible at:
+ http://<yourhosthere>/Shibboleth.sso/Session
+ -->
+
+ <!--
+ Extension service that generates "approximate" metadata
+ based on SP configuration.
+ -->
+ <Handler type="MetadataGenerator" Location="/Metadata"
+ signing="false"/>
+
+ <!-- Status reporting service. -->
+ <Handler type="Status" Location="/Status"
+ acl="127.0.0.1"/>
+
+ <!-- Session diagnostic service. -->
+ <Handler type="Session" Location="/Session"
+ showAttributeValues="true"/>
+ <!-- JSON feed of discovery information. -->
+ <Handler type="DiscoveryFeed" Location="/DiscoFeed"/>
+ </Sessions>
+
+ <!--
+ Error pages to display to yourself if
+ something goes horribly wrong.
+ -->
+ <Errors supportContact ="<admin_email_address>"
+ logoLocation="/shibboleth-sp/logo.jpg"
+ styleSheet="/shibboleth-sp/main.css"/>
+
+ <!--
+ Loads and trusts a metadata file that describes only one IdP
+ and how to communicate with it.
+ -->
+ <MetadataProvider type="XML" uri="<idp-metadata-file>"
+ backingFilePath="<local idp metadata>"
+ reloadInterval="180000" />
+
+ <!-- Attribute and trust options you shouldn't need to change. -->
+ <AttributeExtractor type="XML" validate="true"
+ path="attribute-map.xml"/>
+ <AttributeResolver type="Query" subjectMatch="true"/>
+ <AttributeFilter type="XML" validate="true"
+ path="attribute-policy.xml"/>
+
+ <!--
+ Your SP generated these credentials.
+ They're used to talk to IdP's.
+ -->
+ <CredentialResolver type="File" key="sp-key.pem"
+ certificate="sp-cert.pem"/>
+
+ <ApplicationOverride id="idp_1" entityID="https://<yourhosthere>/shibboleth">
+ <Sessions lifetime="28800" timeout="3600" checkAddress="false"
+ relayState="ss:mem" handlerSSL="false">
+
+ <!-- Triggers a login request directly to the TestShib IdP. -->
+ <SSO entityID="https://<idp_1-url>/idp/shibboleth" ECP="true">
+ SAML2 SAML1
+ </SSO>
+
+ <Logout>SAML2 Local</Logout>
+ </Sessions>
+
+ <MetadataProvider type="XML" uri="<idp_1-metadata-file>"
+ backingFilePath="<local idp_1 metadata>"
+ reloadInterval="180000" />
+
+ </ApplicationOverride>
+
+ <ApplicationOverride id="idp_2" entityID="https://<yourhosthere>/shibboleth">
+ <Sessions lifetime="28800" timeout="3600" checkAddress="false"
+ relayState="ss:mem" handlerSSL="false">
+
+ <!-- Triggers a login request directly to the TestShib IdP. -->
+ <SSO entityID="https://<idp_2-url>/idp/shibboleth" ECP="true">
+ SAML2 SAML1
+ </SSO>
+
+ <Logout>SAML2 Local</Logout>
+ </Sessions>
+
+ <MetadataProvider type="XML" uri="<idp_2-metadata-file>"
+ backingFilePath="<local idp_2 metadata>"
+ reloadInterval="180000" />
+
+ </ApplicationOverride>
+
+ </ApplicationDefaults>
+
+ <!--
+ Security policies you shouldn't change unless you
+ know what you're doing.
+ -->
+ <SecurityPolicyProvider type="XML" validate="true"
+ path="security-policy.xml"/>
+
+ <!--
+ Low-level configuration about protocols and bindings
+ available for use.
+ -->
+ <ProtocolProvider type="XML" validate="true" reloadChanges="false"
+ path="protocols.xml"/>
+
+ </SPConfig>
+
+Keystone enforces `external authentication`_ when the ``REMOTE_USER``
+environment variable is present so make sure Shibboleth doesn't set the
+``REMOTE_USER`` environment variable. To do so, scan through the
+``/etc/shibboleth/shibboleth2.xml`` configuration file and remove the
+``REMOTE_USER`` directives.
+
+Examine your attributes map file ``/etc/shibboleth/attributes-map.xml`` and adjust
+your requirements if needed. For more information see
+`attributes documentation <https://wiki.shibboleth.net/confluence/display/SHIB2/NativeSPAddAttribute>`_
+
+Once you are done, restart your Shibboleth daemon:
+
+.. _`external authentication`: ../external-auth.html
+
+.. code-block:: bash
+
+ $ service shibd restart
+ $ service apache2 restart
diff --git a/keystone-moon/doc/source/federation/websso.rst b/keystone-moon/doc/source/federation/websso.rst
new file mode 100644
index 00000000..4ada0a4c
--- /dev/null
+++ b/keystone-moon/doc/source/federation/websso.rst
@@ -0,0 +1,239 @@
+:orphan:
+
+..
+ Licensed under the Apache License, Version 2.0 (the "License"); you may
+ not use this file except in compliance with the License. You may obtain
+ a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ License for the specific language governing permissions and limitations
+ under the License.
+
+===============================
+Keystone Federation and Horizon
+===============================
+
+Keystone Changes
+================
+
+1. Update `trusted_dashboard` in keystone.conf.
+
+Specify URLs of trusted horizon servers. This value may be repeated
+multiple times. This setting ensures that keystone only sends token data back
+to trusted servers. This is performed as a precaution, specifically to
+prevent man-in-the-middle (MITM) attacks.
+
+.. code-block:: ini
+
+ [federation]
+ trusted_dashboard = http://acme.horizon.com/auth/websso/
+ trusted_dashboard = http://beta.horizon.com/auth/websso/
+
+2. Update httpd vhost file with websso information.
+
+The `/v3/auth/OS-FEDERATION/websso/<protocol>` route must be protected by the
+chosen httpd module. This is performed so the request that originates from
+horizon will use the same identity provider that is configured in keystone.
+
+If `mod_shib` is used, then use the following as an example:
+
+.. code-block:: xml
+
+ <VirtualHost *:5000>
+
+ ...
+
+ <Location ~ "/v3/auth/OS-FEDERATION/websso/saml2">
+ AuthType shibboleth
+ Require valid-user
+ ...
+ </Location>
+ </VirtualHost>
+
+If `mod_auth_openidc` is used, then use the following as an example:
+
+.. code-block:: xml
+
+ <VirtualHost *:5000>
+
+ OIDCRedirectURI http://localhost:5000/v3/auth/OS-FEDERATION/websso/redirect
+
+ ...
+
+ <Location ~ "/v3/auth/OS-FEDERATION/websso/oidc">
+ AuthType openid-connect
+ Require valid-user
+ ...
+ </Location>
+ </VirtualHost>
+
+If `mod_auth_kerb` is used, then use the following as an example:
+
+.. code-block:: xml
+
+ <VirtualHost *:5000>
+
+ ...
+
+ <Location ~ "/v3/auth/OS-FEDERATION/websso/kerberos">
+ AuthType Kerberos
+ AuthName "Acme Corporation"
+ KrbMethodNegotiate on
+ KrbMethodK5Passwd off
+ Krb5Keytab /etc/apache2/http.keytab
+ ...
+ </Location>
+ </VirtualHost>
+
+If `mod_auth_mellon` is used, then use the following as an example:
+
+.. code-block:: xml
+
+ <VirtualHost *:5000>
+
+ ...
+
+ <Location ~ "/v3/auth/OS-FEDERATION/websso/saml2">
+ AuthType Mellon
+ MellonEnable auth
+ Require valid-user
+ ...
+ </Location>
+ </VirtualHost>
+
+.. NOTE::
+ If you are also using SSO via the API, don't forget to make the Location
+ settings match your configuration used for the keystone identity provider
+ location:
+ `/v3/OS-FEDERATION/identity_providers/<idp>/protocols/<protocol>/auth`
+
+3. Update `remote_id_attribute` in keystone.conf.
+
+A remote id attribute indicates the header to retrieve from the WSGI
+environment. This header contains information about the identity
+of the identity provider. For `mod_shib` this would be
+``Shib-Identity-Provider``, for `mod_auth_openidc`, this could be
+``HTTP_OIDC_ISS``. For `mod_auth_mellon`, this could be ``MELLON_IDP``.
+
+It is recommended that this option be set on a per-protocol basis.
+
+.. code-block:: ini
+
+ [saml2]
+ remote_id_attribute = Shib-Identity-Provider
+ [oidc]
+ remote_id_attribute = HTTP_OIDC_ISS
+
+Alternatively, a generic option may be set at the `[federation]` level.
+
+.. code-block:: ini
+
+ [federation]
+ remote_id_attribute = HTTP_OIDC_ISS
+
+4. Set `remote_ids` for a keystone identity provider using the API or CLI.
+
+A keystone identity provider may have multiple `remote_ids` specified, this
+allows the same *keystone* identity provider resource to be used with multiple
+external identity providers. For example, an identity provider resource
+``university-idp``, may have the following `remote_ids`:
+``['university-x', 'university-y', 'university-z']``.
+This removes the need to configure N identity providers in keystone.
+
+This can be performed using the `OS-FEDERATION API`_:
+``PATCH /OS-FEDERATION/identity_providers/{idp_id}``
+
+Or by using the `OpenStackClient CLI`_:
+
+.. code-block:: bash
+
+ $ openstack identity provider set --remote-id <remote-id> <idp-id>
+
+.. NOTE::
+
+ Remote IDs are globally unique. Two identity providers cannot be
+ associated with the same remote ID. Once authenticated with the external
+ identity provider, keystone will determine which identity provider
+ and mapping to use based on the protocol and the value returned from the
+ `remote_id_attribute` key.
+
+ For example, if our identity provider is ``google``, the mapping used is
+ ``google_mapping`` and the protocol is ``oidc``. The identity provider's
+ remote IDs would be: [``accounts.google.com``].
+ The `remote_id_attribute` value may be set to ``HTTP_OIDC_ISS``, since
+ this value will always be ``accounts.google.com``.
+
+ The motivation for this approach is that there will always be some data
+ sent by the identity provider (in the assertion or claim) that uniquely
+ identifies the identity provider. This removes the requirement for horizon
+ to list all the identity providers that are trusted by keystone.
+
+.. _`OpenStackClient CLI`: http://docs.openstack.org/developer/python-openstackclient/command-objects/identity-provider.html#identity-provider-set
+.. _`OS-FEDERATION API`: http://specs.openstack.org/openstack/keystone-specs/api/v3/identity-api-v3-os-federation-ext.html#update-identity-provider
+
+Horizon Changes
+===============
+
+.. NOTE::
+
+ Django OpenStack Auth version 1.2.0 or higher is required for these steps.
+
+1. Set the Identity Service version to 3
+
+Ensure the `OPENSTACK_API_VERSIONS` option in horizon's local_settings.py has
+been updated to indicate that the `identity` version to use is `3`.
+
+.. code-block:: python
+
+ OPENSTACK_API_VERSIONS = {
+ "identity": 3,
+ }
+
+2. Authenticate against Identity Server v3.
+
+Ensure the `OPENSTACK_KEYSTONE_URL` option in horizon's local_settings.py has
+been updated to point to a v3 URL.
+
+.. code-block:: python
+
+ OPENSTACK_KEYSTONE_URL = "http://localhost:5000/v3"
+
+3. Set the `WEBSSO_ENABLED` option.
+
+Ensure the `WEBSSO_ENABLED` option is set to True in horizon's local_settings.py file,
+this will provide users with an updated login screen for horizon.
+
+.. code-block:: python
+
+ WEBSSO_ENABLED = True
+
+4. (Optional) Create a list of authentication methods with the
+ `WEBSSO_CHOICES` option.
+
+Within horizon's settings.py file, a list of supported authentication methods
+can be specified. The entries in the list map to keystone federation protocols,
+with the exception of ``credentials`` which is reserved by horizon, and maps to
+the user name and password used by keystone's identity backend.
+
+.. code-block:: python
+
+ WEBSSO_CHOICES = (
+ ("credentials", _("Keystone Credentials")),
+ ("oidc", _("OpenID Connect")),
+ ("saml2", _("Security Assertion Markup Language"))
+ )
+
+5. (Optional) Specify an initial choice with the `WEBSSO_INITIAL_CHOICE`
+ option.
+
+The list set by the `WEBSSO_CHOICES` option will be generated in a drop-down
+menu in the login screen. The setting `WEBSSO_INITIAL_CHOICE` will
+automatically set that choice to be highlighted by default.
+
+.. code-block:: python
+
+ WEBSSO_INITIAL_CHOICE = "credentials"
diff --git a/keystone-moon/doc/source/http-api.rst b/keystone-moon/doc/source/http-api.rst
index a104ce3f..a31b5e69 100644
--- a/keystone-moon/doc/source/http-api.rst
+++ b/keystone-moon/doc/source/http-api.rst
@@ -26,8 +26,8 @@ The original source of truth for the v2.0 API is defined by a set of WADL and
XSD files. The original source of truth for the v3 API is defined by
documentation.
-.. _`Identity API v2.0`: https://github.com/openstack/identity-api/tree/master/v2.0/src
-.. _`Identity API v3`: https://github.com/openstack/identity-api/tree/master/v3/src/markdown
+.. _`Identity API v2.0`: http://specs.openstack.org/openstack/keystone-specs/#v2-0-api
+.. _`Identity API v3`: http://specs.openstack.org/openstack/keystone-specs/#v3-api
History
=======
@@ -125,7 +125,7 @@ Keystone clients can use to automatically detect available API versions.
With unversioned ``identity`` endpoints in the service catalog, you should be
able to `authenticate with keystoneclient`_ successfully.
-.. _`latest sample configuration`: https://github.com/openstack/keystone/blob/master/etc/keystone-paste.ini
+.. _`latest sample configuration`: https://git.openstack.org/cgit/openstack/keystone/tree/etc/keystone-paste.ini
.. _`authenticate with keystoneclient`: http://docs.openstack.org/developer/python-keystoneclient/using-api-v3.html#authenticating
I have a Python client
@@ -148,7 +148,7 @@ I have a non-Python client
You'll likely need to heavily reference our `API documentation`_ to port your
application to Identity API v3.
-.. _`API documentation`: https://github.com/openstack/identity-api/blob/master/v3/src/markdown/identity-api-v3.md
+.. _`API documentation`: https://git.openstack.org/cgit/openstack-attic/identity-api/tree/v3/src/markdown/identity-api-v3.md
The most common operation would be password-based authentication including a
tenant name (i.e. project name) to specify an authorization scope. In Identity
diff --git a/keystone-moon/doc/source/index.rst b/keystone-moon/doc/source/index.rst
index 48129a80..c77d7738 100644
--- a/keystone-moon/doc/source/index.rst
+++ b/keystone-moon/doc/source/index.rst
@@ -50,7 +50,10 @@ Getting Started
setup
installing
configuration
+ policy_mapping
configure_federation
+ mapping_combinations
+ mapping_schema
configuringservices
extensions
key_terms
diff --git a/keystone-moon/doc/source/installing.rst b/keystone-moon/doc/source/installing.rst
index 0492da7b..e38663b7 100644
--- a/keystone-moon/doc/source/installing.rst
+++ b/keystone-moon/doc/source/installing.rst
@@ -42,7 +42,7 @@ Clone the Keystone repository:
.. code-block:: bash
- $ git clone http://github.com/openstack/keystone.git
+ $ git clone https://git.openstack.org/openstack/keystone.git
$ cd keystone
Install the Keystone web service:
@@ -82,12 +82,12 @@ An excellent reference implementation of setting up Keystone is DEVSTACK_,
most commonly used for development and testing setup of not only Keystone,
but all of the core OpenStack projects.
-.. _DEVSTACK: http://devstack.org/
+.. _DEVSTACK: http://docs.openstack.org/developer/devstack/
The script with the latest examples of initializing data in Keystone is a
-bash script called keystone_data.sh_
+bash script called `lib/keystone`_
-.. _keystone_data.sh: https://github.com/openstack-dev/devstack/blob/master/files/keystone_data.sh
+.. _lib/keystone: https://git.openstack.org/cgit/openstack-dev/devstack/tree/lib/keystone
Installing from packages: Ubuntu
--------------------------------
@@ -111,15 +111,16 @@ find described in :doc:`configuringservices`.
Installing from packages: Fedora
--------------------------------
-Installing Keystone with Fedora 17 is documented at
-http://fedoraproject.org/wiki/Getting_started_with_OpenStack_on_Fedora_17.
+To install Keystone on Fedora refer to the steps found in the `OpenStack
+Install Guide`_.
To install the packages:
.. code-block:: bash
- $ sudo yum install --enablerepo=updates-testing openstack-keystone
+ $ sudo yum install openstack-keystone
-Once installed, you can configure Keystone based on the instructions at:
+Once installed, you still need to initialize data in Keystone, which you can
+find described in :doc:`configuringservices`.
-http://fedoraproject.org/wiki/Getting_started_with_OpenStack_on_Fedora_17#Configuring_Keystone_for_authentication
+.. _`OpenStack Install Guide`: http://docs.openstack.org/juno/install-guide/install/yum/content/keystone-install.html
diff --git a/keystone-moon/doc/source/man/keystone-all.rst b/keystone-moon/doc/source/man/keystone-all.rst
index 328b0c4e..ea958fe0 100644
--- a/keystone-moon/doc/source/man/keystone-all.rst
+++ b/keystone-moon/doc/source/man/keystone-all.rst
@@ -108,5 +108,5 @@ SEE ALSO
SOURCE
======
-* Keystone source is managed in GitHub `Keystone <http://github.com/openstack/keystone>`__
+* Keystone source is managed in Gerrit git `Keystone <https://git.openstack.org/cgit/openstack/keystone>`__
* Keystone bugs are managed at Launchpad `Keystone <https://bugs.launchpad.net/keystone>`__
diff --git a/keystone-moon/doc/source/man/keystone-manage.rst b/keystone-moon/doc/source/man/keystone-manage.rst
index b2ea3924..21a3ca4a 100644
--- a/keystone-moon/doc/source/man/keystone-manage.rst
+++ b/keystone-moon/doc/source/man/keystone-manage.rst
@@ -7,9 +7,9 @@ Keystone Management Utility
---------------------------
:Author: openstack@lists.openstack.org
-:Date: 2014-10-16
+:Date: 2015-4-7
:Copyright: OpenStack Foundation
-:Version: 2014.2
+:Version: 2015.1
:Manual section: 1
:Manual group: cloud computing
@@ -42,7 +42,11 @@ Available commands:
* ``db_sync``: Sync the database.
* ``db_version``: Print the current migration version of the database.
+* ``domain_config_upload``: Upload domain configuration file.
+* ``fernet_rotate``: Rotate keys in the Fernet key repository.
+* ``fernet_setup``: Setup a Fernet key repository.
* ``mapping_purge``: Purge the identity mapping table.
+* ``mapping_engine``: Test your federation mapping rules.
* ``pki_setup``: Initialize the certificates used to sign tokens.
* ``saml_idp_metadata``: Generate identity provider metadata.
* ``ssl_setup``: Generate certificates for SSL.
@@ -121,5 +125,5 @@ SEE ALSO
SOURCE
======
-* Keystone is sourced in GitHub `Keystone <http://github.com/openstack/keystone>`__
+* Keystone is sourced in Gerrit git `Keystone <https://git.openstack.org/cgit/openstack/keystone>`__
* Keystone bugs are managed at Launchpad `Keystone <https://bugs.launchpad.net/keystone>`__
diff --git a/keystone-moon/doc/source/mapping_combinations.rst b/keystone-moon/doc/source/mapping_combinations.rst
new file mode 100644
index 00000000..9aa411ad
--- /dev/null
+++ b/keystone-moon/doc/source/mapping_combinations.rst
@@ -0,0 +1,597 @@
+..
+ Licensed under the Apache License, Version 2.0 (the "License"); you may not
+ use this file except in compliance with the License. You may obtain a copy
+ of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ License for the specific language governing permissions and limitations
+ under the License.
+
+===================================
+Mapping Combinations for Federation
+===================================
+
+-----------
+Description
+-----------
+
+Mapping adds a set of rules to map federation attributes to Keystone users and/or
+groups. An Identity Provider has exactly one mapping specified per protocol.
+
+Mapping objects can be used multiple times by different combinations of Identity
+Provider and Protocol.
+
+-----------
+Definitions
+-----------
+
+A rule hierarchy looks as follows:
+
+.. code-block:: javascript
+
+ {
+ "rules": [
+ {
+ "local": [
+ {
+ "<user> or <group>"
+ }
+ ],
+ "remote": [
+ {
+ "<condition>"
+ }
+ ]
+ }
+ ]
+ }
+
+* `rules`: top-level list of rules.
+* `local`: a rule containing information on what local attributes will be mapped.
+* `remote`: a rule containing information on what remote attributes will be mapped.
+* `<condition>`: contains information on conditions that allow a rule, can only
+ be set in a `remote` rule.
+
+-------------
+Mapping Rules
+-------------
+
+Mapping Engine
+--------------
+
+The mapping engine can be tested before creating a federated setup. It can be
+tested with the ``keystone-manage mapping_engine`` command:
+
+.. code-block:: bash
+
+ $ keystone-manage mapping_engine --rules <file> --input <file>
+
+Mapping Conditions
+------------------
+
+Mappings support 5 different types of conditions:
+
+``empty``: The rule is matched to all claims containing the remote attribute type.
+This condition does not need to be specified.
+
+``any_one_of``: The rule is matched only if any of the specified strings appear
+in the remote attribute type. Condition result is boolean, not the argument that
+is passed as input.
+
+``not_any_of``: The rule is not matched if any of the specified strings appear
+in the remote attribute type. Condition result is boolean, not the argument that
+is passed as input.
+
+``blacklist``: The rule allows all except a specified set of groups. Condition
+result is the argument(s) passed as input minus what was matched in the
+blacklist.
+
+``whitelist``: The rules allows a specified set of groups. Condition result is
+the argument(s) passed as input and is/are also present in the whitelist.
+
+.. NOTE::
+
+ ``empty``, ``blacklist`` and ``whitelist`` are the only conditions that can
+ be used in direct mapping ({0}, {1}, etc.)
+
+You can combine multiple conditions in a single rule. The schema that needs to be
+followed for the mapping rules can be seen in the :doc:`mapping_schema` page.
+
+Mappings Examples
+-----------------
+
+The following are all examples of mapping rule types.
+
+empty condition
+~~~~~~~~~~~~~~~
+
+.. code-block:: javascript
+
+ {
+ "rules": [
+ {
+ "local": [
+ {
+ "user": {
+ "name": "{0} {1}",
+ "email": "{2}"
+ },
+ "group": {
+ "name": "{3}"
+ }
+ }
+ ],
+ "remote": [
+ {
+ "type": "FirstName"
+ },
+ {
+ "type": "LastName"
+ },
+ {
+ "type": "Email"
+ },
+ {
+ "type": "OIDC_GROUPS"
+ }
+ ]
+ }
+ ]
+ }
+
+.. NOTE::
+
+ The numbers in braces {} are indices, they map in order. For example::
+
+ - Mapping to user with the name matching the value in remote attribute FirstName
+ - Mapping to user with the name matching the value in remote attribute LastName
+ - Mapping to user with the email matching value in remote attribute Email
+ - Mapping to a group(s) with the name matching the value(s) in remote attribute OIDC_GROUPS
+
+
+
+Groups can have multiple values. Each value must be separated by a `;`
+Example: OIDC_GROUPS=developers;testers
+
+
+other conditions
+~~~~~~~~~~~~~~~~
+
+In ``<other_condition>`` shown below, please supply one of the following:
+``any_one_of``, or ``not_any_of``.
+
+.. code-block:: javascript
+
+ {
+ "rules": [
+ {
+ "local": [
+ {
+ "user": {
+ "name": "{0}"
+ },
+ "group": {
+ "id": "0cd5e9"
+ }
+ }
+ ],
+ "remote": [
+ {
+ "type": "UserName"
+ },
+ {
+ "type": "HTTP_OIDC_GROUPIDS",
+ "<other_condition>": [
+ "HTTP_OIDC_EMAIL"
+ ]
+ }
+ ]
+ }
+ ]
+ }
+
+In ``<other_condition>`` shown below, please supply one of the following:
+``blacklist``, or ``whitelist``.
+
+.. code-block:: javascript
+
+ {
+ "rules": [
+ {
+ "local": [
+ {
+ "user": {
+ "name": "{0}"
+ },
+ "groups": {
+ "name": "{1}",
+ "domain": {
+ "id": "0cd5e9"
+ }
+ }
+ }
+ ],
+ "remote": [
+ {
+ "type": "UserName"
+ },
+ {
+ "type": "HTTP_OIDC_GROUPIDS",
+ "<other_condition>": [
+ "me@example.com"
+ ]
+ }
+ ]
+ }
+ ]
+ }
+
+.. NOTE::
+
+ If the user id and name are not specified in the mapping, the server tries to
+ directly map ``REMOTE_USER`` environment variable. If this variable is also
+ unavailable the server returns an HTTP 401 Unauthorized error.
+
+Group ids and names can be provided in the local section:
+
+.. code-block:: javascript
+
+ {
+ "local": [
+ {
+ "group": {
+ "id":"0cd5e9"
+ }
+ }
+ ]
+ }
+
+.. code-block:: javascript
+
+ {
+ "local": [
+ {
+ "group": {
+ "name": "developer_group",
+ "domain": {
+ "id": "abc1234"
+ }
+ }
+ }
+ ]
+ }
+
+.. code-block:: javascript
+
+ {
+ "local": [
+ {
+ "group": {
+ "name": "developer_group",
+ "domain": {
+ "name": "private_cloud"
+ }
+ }
+ }
+ ]
+ }
+
+
+Output
+------
+
+If a mapping is valid you will receive the following output:
+
+.. code-block:: javascript
+
+ {
+ "group_ids": "[<group-ids>]",
+ "user":
+ {
+ "domain":
+ {
+ "id": "Federated" or "<local-domain-id>"
+ },
+ "type": "ephemeral" or "local",
+ "name": "<local-user-name>",
+ "id": "<local-user-id>"
+ },
+ "group_names":
+ [
+ {
+ "domain":
+ {
+ "name": "<domain-name>"
+ },
+ "name":
+ {
+ "name": "[<groups-names>]"
+ }
+ }
+ {
+ "domain":
+ {
+ "name": "<domain-name>"
+ },
+ "name":
+ {
+ "name": "[<groups-names>]"
+ }
+ }
+ ]
+ }
+
+The ``type`` parameter specifies the type of user being mapped. The 2 possible
+user types are ``local`` and ``ephemeral``.``local`` is displayed if the user
+has a domain specified. The user is treated as existing in the backend, hence
+the server will fetch user details (id, name, roles, groups).``ephemeral`` is
+displayed for a user that does not exist in the backend.
+
+The ``id`` parameter in the service domain specifies the domain a user belongs
+to. ``Federated`` will be displayed if no domain is specified in the local rule.
+User is deemed ephemeral and becomes a member of service domain named ``Federated``.
+If the domain is specified the local domain's id will be displayed.
+If the mapped user is local, mapping engine will discard further group
+assigning and return set of roles configured for the user.
+
+.. NOTE::
+ Domain ``Federated`` is a service domain - it cannot be listed, displayed,
+ added or deleted. There is no need to perform any operation on it prior to
+ federation configuration.
+
+Regular Expressions
+-------------------
+
+Regular expressions can be used in a mapping by specifying the ``regex`` key, and
+setting it to ``true``.
+
+.. code-block:: javascript
+
+ {
+ "rules": [
+ {
+ "local": [
+ {
+ "user": {
+ "name": "{0}"
+ },
+ "group": {
+ "id": "0cd5e9"
+ }
+ },
+ ],
+ "remote": [
+ {
+ "type": "UserName"
+ },
+ {
+ "type": "HTTP_OIDC_GROUPIDS",
+ "any_one_of": [
+ ".*@yeah.com$"
+ ]
+ "regex": true
+ }
+ ]
+ }
+ ]
+ }
+
+This allows any user with a claim containing a key with any value in
+``HTTP_OIDC_GROUPIDS`` to be mapped to group with id ``0cd5e9``.
+
+Condition Combinations
+----------------------
+
+Combinations of mappings conditions can also be done.
+
+``empty``, ``any_one_of``, and ``not_any_of`` can all be used in the same rule,
+but cannot be repeated within the same condition. ``any_one_of`` and
+``not_any_of`` are mutually exclusive within a condition's scope. So are
+``whitelist`` and ``blacklist``.
+
+.. code-block:: javascript
+
+ {
+ "rules": [
+ {
+ "local": [
+ {
+ "user": {
+ "name": "{0}"
+ },
+ "group": {
+ "id": "0cd5e9"
+ }
+ },
+ ],
+ "remote": [
+ {
+ "type": "UserName"
+ },
+ {
+ "type": "cn=IBM_Canada_Lab",
+ "not_any_of": [
+ ".*@naww.com$"
+ ],
+ "regex": true
+ },
+ {
+ "type": "cn=IBM_USA_Lab",
+ "any_one_of": [
+ ".*@yeah.com$"
+ ]
+ "regex": true
+ }
+ ]
+ }
+ ]
+ }
+
+As before group names and users can also be provided in the local section.
+
+This allows any user with the following claim information to be mapped to
+group with id 0cd5e9.
+
+.. code-block:: javascript
+
+ {"UserName":"<any_name>@yeah.com"}
+ {"cn=IBM_USA_Lab":"<any_name>@yeah.com"}
+ {"cn=IBM_Canada_Lab":"<any_name>@yeah.com"}
+
+The following claims will be mapped:
+
+- any claim containing the key UserName.
+- any claim containing key cn=IBM_Canada_Lab that doesn't have the value <any_name>@naww.com.
+- any claim containing key cn=IBM_USA_Lab that has value <any_name>@yeah.com.
+
+Multiple Rules
+--------------
+
+Multiple rules can also be utilized in a mapping.
+
+.. code-block:: javascript
+
+ {
+ "rules": [
+ {
+ "local": [
+ {
+ "user": {
+ "name": "{0}"
+ },
+ "group": {
+ "name": "non-contractors",
+ "domain": {
+ "id": "abc1234"
+ }
+ }
+ }
+ ],
+ "remote": [
+ {
+ "type": "UserName"
+ },
+ {
+ "type": "orgPersonType",
+ "not_any_of": [
+ "Contractor",
+ "SubContractor"
+ ]
+ }
+ ]
+ },
+ {
+ "local": [
+ {
+ "user": {
+ "name": "{0}"
+ },
+ "group": {
+ "name": "contractors",
+ "domain": {
+ "id": "abc1234"
+ }
+ }
+ }
+ ],
+ "remote": [
+ {
+ "type": "UserName"
+ },
+ {
+ "type": "orgPersonType",
+ "any_one_of": [
+ "Contractor",
+ "SubContractor"
+ ]
+ }
+ ]
+ }
+ ]
+ }
+
+
+The above assigns groups membership basing on ``orgPersonType`` values:
+
+- neither ``Contractor`` nor ``SubContractor`` will belong to the ``non-contractors`` group.
+- either ``Contractor or ``SubContractor`` will belong to the ``contractors`` group.
+
+Rules are additive, so permissions will only be granted for the rules that
+succeed. All the remote conditions of a rule must be valid.
+
+When using multiple rules you can specify more than one effective user
+identification, but only the first match will be considered and the others
+ignored ordered from top to bottom.
+
+Since rules are additive one can specify one user identification and this will
+also work. The best practice for multiple rules is to create a rule for just
+user and another rule for just groups. Below is rules example repeated but with
+global username mapping.
+
+
+.. code-block:: javascript
+
+ {
+ "rules": [
+ {
+ "local": [
+ "user": {
+ "id": "{0}"
+ }
+ ],
+ "remote": [
+ {
+ "type": "UserType"
+ }
+ ]
+ },
+ {
+ "local": [
+ {
+ "group": {
+ "name": "non-contractors",
+ "domain": {
+ "id": "abc1234"
+ }
+ }
+ }
+ ],
+ "remote": [
+ {
+ "type": "orgPersonType",
+ "not_any_of": [
+ "Contractor",
+ "SubContractor"
+ ]
+ }
+ ]
+ },
+ {
+ "local": [
+ {
+ "group": {
+ "name": "contractors",
+ "domain": {
+ "id": "abc1234"
+ }
+ }
+ }
+ ],
+ "remote": [
+ {
+ "type": "orgPersonType",
+ "any_one_of": [
+ "Contractor",
+ "SubContractor"
+ ]
+ }
+ ]
+ }
+ ]
+ }
+
+
diff --git a/keystone-moon/doc/source/mapping_schema.rst b/keystone-moon/doc/source/mapping_schema.rst
new file mode 100644
index 00000000..a020178b
--- /dev/null
+++ b/keystone-moon/doc/source/mapping_schema.rst
@@ -0,0 +1,160 @@
+..
+ Licensed under the Apache License, Version 2.0 (the "License"); you may not
+ use this file except in compliance with the License. You may obtain a copy
+ of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ License for the specific language governing permissions and limitations
+ under the License.
+
+=============================
+Mapping Schema for Federation
+=============================
+
+Description
+-----------
+
+The schema for mapping is a description of how a mapping should be created.
+It shows all the requirements and possibilities for a JSON to be used for mapping.
+
+Mapping schema is validated with `JSON Schema
+<http://http://json-schema.org/documentation.html>`__
+
+Mapping Schema
+--------------
+
+The rules supported must use the following schema:
+
+.. code-block:: javascript
+
+ {
+ "type": "object",
+ "required": ['rules'],
+ "properties": {
+ "rules": {
+ "minItems": 1,
+ "type": "array",
+ "items": {
+ "type": "object",
+ "required": ['local', 'remote'],
+ "additionalProperties": False,
+ "properties": {
+ "local": {
+ "type": "array"
+ },
+ "remote": {
+ "minItems": 1,
+ "type": "array",
+ "items": {
+ "type": "object",
+ "oneOf": [
+ {"$ref": "#/definitions/empty"},
+ {"$ref": "#/definitions/any_one_of"},
+ {"$ref": "#/definitions/not_any_of"},
+ {"$ref": "#/definitions/blacklist"},
+ {"$ref": "#/definitions/whitelist"}
+ ],
+ }
+ }
+ }
+ }
+ }
+ },
+ "definitions": {
+ "empty": {
+ "type": "object",
+ "required": ['type'],
+ "properties": {
+ "type": {
+ "type": "string"
+ },
+ },
+ "additionalProperties": False,
+ },
+ "any_one_of": {
+ "type": "object",
+ "additionalProperties": False,
+ "required": ['type', 'any_one_of'],
+ "properties": {
+ "type": {
+ "type": "string"
+ },
+ "any_one_of": {
+ "type": "array"
+ },
+ "regex": {
+ "type": "boolean"
+ }
+ }
+ },
+ "not_any_of": {
+ "type": "object",
+ "additionalProperties": False,
+ "required": ['type', 'not_any_of'],
+ "properties": {
+ "type": {
+ "type": "string"
+ },
+ "not_any_of": {
+ "type": "array"
+ },
+ "regex": {
+ "type": "boolean"
+ }
+ }
+ },
+ "blacklist": {
+ "type": "object",
+ "additionalProperties": False,
+ "required": ['type', 'blacklist'],
+ "properties": {
+ "type": {
+ "type": "string"
+ },
+ "blacklist": {
+ "type": "array"
+ }
+ }
+ },
+ "whitelist": {
+ "type": "object",
+ "additionalProperties": False,
+ "required": ['type', 'whitelist'],
+ "properties": {
+ "type": {
+ "type": "string"
+ },
+ "whitelist": {
+ "type": "array"
+ }
+ }
+ }
+ }
+ }
+
+.. NOTE::
+
+ ``"additionalProperties": False``, shows that only the properties shown can be displayed.
+
+ .. code-block:: javascript
+
+ "whitelist": {
+ "type": "object",
+ "additionalProperties": False,
+ "required": ['type', 'whitelist'],
+ "properties": {
+ "type": {
+ "type": "string"
+ },
+ "whitelist": {
+ "type": "array"
+ }
+ }
+ }
+
+ Keystone will not accept any other keys in the JSON mapping other than ``type``, and
+ ``whitelist``.
diff --git a/keystone-moon/doc/source/policy_mapping.rst b/keystone-moon/doc/source/policy_mapping.rst
new file mode 100644
index 00000000..9b11efd6
--- /dev/null
+++ b/keystone-moon/doc/source/policy_mapping.rst
@@ -0,0 +1,213 @@
+===============================
+Mapping of policy target to API
+===============================
+
+The following table shows the target in the policy.json file for each API.
+
+========================================================= ===
+Target API
+========================================================= ===
+identity:get_region GET /v3/regions/{region_id}
+identity:list_regions GET /v3/regions
+identity:create_region POST /v3/regions
+identity:update_region PATCH /v3/regions/{region_id}
+identity:delete_region DELETE /v3/regions/{region_id}
+
+identity:get_service GET /v3/services/{service_id}
+identity:list_services GET /v3/services
+identity:create_service POST /v3/services
+identity:update_service PATCH /v3/services/{service__id}
+identity:delete_service DELETE /v3/services/{service__id}
+
+identity:get_endpoint GET /v3/endpoints/{endpoint_id}
+identity:list_endpoints GET /v3/endpoints
+identity:create_endpoint POST /v3/endpoints
+identity:update_endpoint PATCH /v3/endpoints/{endpoint_id}
+identity:delete_endpoint DELETE /v3/endpoints/{endpoint_id}
+
+identity:get_domain GET /v3/domains/{domain_id}
+identity:list_domains GET /v3/domains
+identity:create_domain POST /v3/domains
+identity:update_domain PATCH /v3/domains/{domain_id}
+identity:delete_domain DELETE /v3/domains/{domain_id}
+
+identity:get_project GET /v3/projects/{project_id}
+identity:list_projects GET /v3/projects
+identity:list_user_projects GET /v3/users/{user_id}/projects
+identity:create_project POST /v3/projects
+identity:update_project PATCH /v3/projects/{project_id}
+identity:delete_project DELETE /v3/projects/{project_id}
+
+identity:get_user GET /v3/users/{user_id}
+identity:list_users GET /v3/users
+identity:create_user POST /v3/users
+identity:update_user PATCH /v3/users/{user_id}
+identity:delete_user DELETE /v3/users/{user_id}
+identity:change_password POST /v3/users/{user_id}/password
+
+identity:get_group GET /v3/groups/{group_id}
+identity:list_groups GET /v3/groups
+identity:list_groups_for_user GET /v3/users/{user_id}/groups
+identity:create_group POST /v3/groups
+identity:update_group PATCH /v3/groups/{group_id}
+identity:delete_group DELETE /v3/groups/{group_id}
+identity:list_users_in_group GET /v3/groups/{group_id}/users
+identity:remove_user_from_group DELETE /v3/groups/{group_id}/users/{user_id}
+identity:check_user_in_group GET /v3/groups/{group_id}/users/{user_id}
+identity:add_user_to_group PUT /v3/groups/{group_id}/users/{user_id}
+
+identity:get_credential GET /v3/credentials/{credential_id}
+identity:list_credentials GET /v3/credentials
+identity:create_credential POST /v3/credentials
+identity:update_credential PATCH /v3/credentials/{credential_id}
+identity:delete_credential DELETE /v3/credentials/{credential_id}
+
+identity:ec2_get_credential GET /v3/users/{user_id}/credentials/OS-EC2/{credential_id}
+identity:ec2_list_credentials GET /v3/users/{user_id}/credentials/OS-EC2
+identity:ec2_create_credential POST /v3/users/{user_id}/credentials/OS-EC2
+identity:ec2_delete_credential DELETE /v3/users/{user_id}/credentials/OS-EC2/{credential_id}
+
+identity:get_role GET /v3/roles/{role_id}
+identity:list_roles GET /v3/roles
+identity:create_role POST /v3/roles
+identity:update_role PATCH /v3/roles/{role_id}
+identity:delete_role DELETE /v3/roles/{role_id}
+
+identity:check_grant GET `grant_resources`_
+identity:list_grants GET `grant_collections`_
+identity:create_grant PUT `grant_resources`_
+identity:revoke_grant DELETE `grant_resources`_
+
+identity:list_role_assignments GET /v3/role_assignments
+
+identity:get_policy GET /v3/policy/{policy_id}
+identity:list_policies GET /v3/policy
+identity:create_policy POST /v3/policy
+identity:update_policy PATCH /v3/policy/{policy_id}
+identity:delete_policy DELETE /v3/policy/{policy_id}
+
+identity:check_token HEAD /v3/auth/tokens
+identity:validate_token - GET /v2.0/tokens/{token_id}
+ - GET /v3/auth/tokens
+identity:validate_token_head HEAD /v2.0/tokens/{token_id}
+identity:revocation_list - GET /v2.0/tokens/revoked
+ - GET /v3/auth/tokens/OS-PKI/revoked
+identity:revoke_token DELETE /v3/auth/tokens
+identity:create_trust POST /v3/OS-TRUST/trusts
+identity:list_trusts GET /v3/OS-TRUST/trusts
+identity:list_roles_for_trust GET /v3/OS-TRUST/trusts/{trust_id}/roles
+identity:get_role_for_trust GET /v3/OS-TRUST/trusts/{trust_id}/roles/{role_id}
+identity:delete_trust DELETE /v3/OS-TRUST/trusts/{trust_id}
+
+identity:create_consumer POST /v3/OS-OAUTH1/consumers
+identity:get_consumer GET /v3/OS-OAUTH1/consumers/{consumer_id}
+identity:list_consumers GET /v3/OS-OAUTH1/consumers
+identity:delete_consumer DELETE /v3/OS-OAUTH1/consumers/{consumer_id}
+identity:update_consumer PATCH /v3/OS-OAUTH1/consumers/{consumer_id}
+
+identity:authorize_request_token PUT /v3/OS-OAUTH1/authorize/{request_token_id}
+identity:list_access_token_roles GET /v3/users/{user_id}/OS-OAUTH1/access_tokens/{access_token_id}/roles
+identity:get_access_token_role GET /v3/users/{user_id}/OS-OAUTH1/access_tokens/{access_token_id}/roles/{role_id}
+identity:list_access_tokens GET /v3/users/{user_id}/OS-OAUTH1/access_tokens
+identity:get_access_token GET /v3/users/{user_id}/OS-OAUTH1/access_tokens/{access_token_id}
+identity:delete_access_token DELETE /v3/users/{user_id}/OS-OAUTH1/access_tokens/{access_token_id}
+
+identity:list_projects_for_endpoint GET /v3/OS-EP-FILTER/endpoints/{endpoint_id}/projects
+identity:add_endpoint_to_project PUT /v3/OS-EP-FILTER/projects/{project_id}/endpoints/{endpoint_id}
+identity:check_endpoint_in_project GET /v3/OS-EP-FILTER/projects/{project_id}/endpoints/{endpoint_id}
+identity:list_endpoints_for_project GET /v3/OS-EP-FILTER/projects/{project_id}/endpoints
+identity:remove_endpoint_from_project DELETE /v3/OS-EP-FILTER/projects/{project_id}/endpoints/{endpoint_id}
+
+identity:create_endpoint_group POST /v3/OS-EP-FILTER/endpoint_groups
+identity:list_endpoint_groups GET /v3/OS-EP-FILTER/endpoint_groups
+identity:get_endpoint_group GET /v3/OS-EP-FILTER/endpoint_groups/{endpoint_group_id}
+identity:update_endpoint_group PATCH /v3/OS-EP-FILTER/endpoint_groups/{endpoint_group_id}
+identity:delete_endpoint_group DELETE /v3/OS-EP-FILTER/endpoint_groups/{endpoint_group_id}
+identity:list_projects_associated_with_endpoint_group GET /v3/OS-EP-FILTER/endpoint_groups/{endpoint_group_id}/projects
+identity:list_endpoints_associated_with_endpoint_group GET /v3/OS-EP-FILTER/endpoint_groups/{endpoint_group_id}/endpoints
+identity:get_endpoint_group_in_project GET /v3/OS-EP-FILTER/endpoint_groups/{endpoint_group_id}/projects/{project_id}
+identity:list_endpoint_groups_for_project GET /v3/OS-EP-FILTER/projects/{project_id}/endpoint_groups
+identity:add_endpoint_group_to_project PUT /v3/OS-EP-FILTER/endpoint_groups/{endpoint_group_id}/projects/{project_id}
+identity:remove_endpoint_group_from_project DELETE /v3/OS-EP-FILTER/endpoint_groups/{endpoint_group_id}/projects/{project_id}
+
+identity:create_identity_provider PUT /v3/OS-FEDERATION/identity_providers/{idp_id}
+identity:list_identity_providers GET /v3/OS-FEDERATION/identity_providers
+identity:get_identity_providers GET /v3/OS-FEDERATION/identity_providers/{idp_id}
+identity:update_identity_provider PATCH /v3/OS-FEDERATION/identity_providers/{idp_id}
+identity:delete_identity_provider DELETE /v3/OS-FEDERATION/identity_providers/{idp_id}
+
+identity:create_protocol PUT /v3/OS-FEDERATION/identity_providers/{idp_id}/protocols/{protocol_id}
+identity:update_protocol PATCH /v3/OS-FEDERATION/identity_providers/{idp_id}/protocols/{protocol_id}
+identity:get_protocol GET /v3/OS-FEDERATION/identity_providers/{idp_id}/protocols/{protocol_id}
+identity:list_protocols GET /v3/OS-FEDERATION/identity_providers/{idp_id}/protocols
+identity:delete_protocol DELETE /v3/OS-FEDERATION/identity_providers/{idp_id}/protocols/{protocol_id}
+
+identity:create_mapping PUT /v3/OS-FEDERATION/mappings/{mapping_id}
+identity:get_mapping GET /v3/OS-FEDERATION/mappings/{mapping_id}
+identity:list_mappings GET /v3/OS-FEDERATION/mappings
+identity:delete_mapping DELETE /v3/OS-FEDERATION/mappings/{mapping_id}
+identity:update_mapping PATCH /v3/OS-FEDERATION/mappings/{mapping_id}
+
+identity:create_service_provider PUT /v3/OS-FEDERATION/service_providers/{sp_id}
+identity:list_service_providers GET /v3/OS-FEDERATION/service_providers
+identity:get_service_provider GET /v3/OS-FEDERATION/service_providers/{sp_id}
+identity:update_service_provider PATCH /v3/OS-FEDERATION/service_providers/{sp_id}
+identity:delete_service_provider DELETE /v3/OS-FEDERATION/service_providers/{sp_id}
+
+identity:get_auth_catalog GET /v3/auth/catalog
+identity:get_auth_projects GET /v3/auth/projects
+identity:get_auth_domains GET /v3/auth/domains
+
+identity:list_projects_for_groups GET /v3/OS-FEDERATION/projects
+identity:list_domains_for_groups GET /v3/OS-FEDERATION/domains
+
+identity:list_revoke_events GET /v3/OS-REVOKE/events
+
+identity:create_policy_association_for_endpoint PUT /v3/policies/{policy_id}/OS-ENDPOINT-POLICY/endpoints/{endpoint_id}
+identity:check_policy_association_for_endpoint GET /v3/policies/{policy_id}/OS-ENDPOINT-POLICY/endpoints/{endpoint_id}
+identity:delete_policy_association_for_endpoint DELETE /v3/policies/{policy_id}/OS-ENDPOINT-POLICY/endpoints/{endpoint_id}
+identity:create_policy_association_for_service PUT /v3/policies/{policy_id}/OS-ENDPOINT-POLICY/services/{service_id}
+identity:check_policy_association_for_service GET /v3/policies/{policy_id}/OS-ENDPOINT-POLICY/services/{service_id}
+identity:delete_policy_association_for_service DELETE /v3/policies/{policy_id}/OS-ENDPOINT-POLICY/services/{service_id}
+identity:create_policy_association_for_region_and_service PUT /v3/policies/{policy_id}/OS-ENDPOINT-POLICY/services/{service_id}/regions/{region_id}
+identity:check_policy_association_for_region_and_service GET /v3/policies/{policy_id}/OS-ENDPOINT-POLICY/services/{service_id}/regions/{region_id}
+identity:delete_policy_association_for_region_and_service DELETE /v3/policies/{policy_id}/OS-ENDPOINT-POLICY/services/{service_id}/regions/{region_id}
+identity:get_policy_for_endpoint GET /v3/endpoints/{endpoint_id}/OS-ENDPOINT-POLICY/policy
+identity:list_endpoints_for_policy GET /v3/policies/{policy_id}/OS-ENDPOINT-POLICY/endpoints
+
+identity:create_domain_config PUT /v3/domains/{domain_id}/config
+identity:get_domain_config - GET /v3/domains/{domain_id}/config
+ - GET /v3/domains/{domain_id}/config/{group}
+ - GET /v3/domains/{domain_id}/config/{group}/{option}
+identity:update_domain_config - PATCH /v3/domains/{domain_id}/config
+ - PATCH /v3/domains/{domain_id}/config/{group}
+ - PATCH /v3/domains/{domain_id}/config/{group}/{option}
+identity:delete_domain_config - DELETE /v3/domains/{domain_id}/config
+ - DELETE /v3/domains/{domain_id}/config/{group}
+ - DELETE /v3/domains/{domain_id}/config/{group}/{option}
+
+========================================================= ===
+
+.. _grant_resources:
+
+*grant_resources* are:
+
+- /v3/projects/{project_id}/users/{user_id}/roles/{role_id}
+- /v3/projects/{project_id}/groups/{group_id}/roles/{role_id}
+- /v3/domains/{domain_id}/users/{user_id}/roles/{role_id}
+- /v3/domains/{domain_id}/groups/{group_id}/roles/{role_id}
+- /v3/OS-INHERIT/domains/{domain_id}/users/{user_id}/roles/{role_id}/inherited_to_projects
+- /v3/OS-INHERIT/domains/{domain_id}/groups/{group_id}/roles/{role_id}/inherited_to_projects
+- /v3/OS-INHERIT/projects/{project_id}/users/{user_id}/roles/{role_id}/inherited_to_projects
+- /v3/OS-INHERIT/projects/{project_id}/groups/{group_id}/roles/{role_id}/inherited_to_projects
+
+.. _grant_collections:
+
+*grant_collections* are:
+
+- /v3/projects/{project_id}/users/{user_id}/roles
+- /v3/projects/{project_id}/groups/{group_id}/roles
+- /v3/domains/{domain_id}/users/{user_id}/roles
+- /v3/domains/{domain_id}/groups/{group_id}/role
+- /v3/OS-INHERIT/domains/{domain_id}/groups/{group_id}/roles/inherited_to_projects
+- /v3/OS-INHERIT/domains/{domain_id}/users/{user_id}/roles/inherited_to_projects
diff --git a/keystone-moon/doc/source/setup.rst b/keystone-moon/doc/source/setup.rst
index f919dccc..d1ce8f4c 100644
--- a/keystone-moon/doc/source/setup.rst
+++ b/keystone-moon/doc/source/setup.rst
@@ -18,27 +18,23 @@
Setting up a Keystone development environment
=============================================
-This document describes getting the source from keystone's `GitHub repository`_
+This document describes getting the source from keystone's `Git repository`_
for development purposes.
To install Keystone from packaging, refer instead to Keystone's `User
Documentation`_.
-.. _`GitHub Repository`: http://github.com/openstack/keystone
+.. _`Git Repository`: http://git.openstack.org/cgit/openstack/keystone
.. _`User Documentation`: http://docs.openstack.org/
Prerequisites
=============
-This document assumes you are using:
-
-- Ubuntu, Fedora or openSUSE (SLE)
-- `Python 2.7`_
-
-.. _`Python 2.7`: http://www.python.org/
+This document assumes you are using Ubuntu, Fedora or openSUSE (SLE)
And that you have the following tools available on your system:
+- Python_ 2.7 and 3.4
- git_
- setuptools_
- pip_
@@ -48,17 +44,18 @@ And that you have the following tools available on your system:
**Reminder**: If you're successfully using a different platform, or a
different version of the above, please document your configuration here!
+.. _Python: http://www.python.org/
.. _git: http://git-scm.com/
.. _setuptools: http://pypi.python.org/pypi/setuptools
Getting the latest code
=======================
-Make a clone of the code from our `Github repository`:
+Make a clone of the code from our `Git repository`:
.. code-block:: bash
- $ git clone https://github.com/openstack/keystone.git
+ $ git clone https://git.openstack.org/openstack/keystone.git
When that is complete, you can:
@@ -86,18 +83,19 @@ extension, PyPi) cannot satisfy. These dependencies should be installed
prior to using `pip`, and the installation method may vary depending on
your platform.
-Ubuntu 12.04:
+Ubuntu 14.04:
.. code-block:: bash
- $ sudo apt-get install python-dev libxml2-dev libxslt1-dev libsasl2-dev libsqlite3-dev libssl-dev libldap2-dev libffi-dev
+ $ sudo apt-get install python-dev python3-dev libxml2-dev libxslt1-dev \
+ libsasl2-dev libsqlite3-dev libssl-dev libldap2-dev libffi-dev
Fedora 19+:
.. code-block:: bash
- $ sudo yum install python-sqlite2 python-lxml python-greenlet-devel python-ldap sqlite-devel openldap-devel python-devel libxslt-devel openssl-devel
+ $ sudo yum install python-lxml python-greenlet-devel python-ldap sqlite-devel openldap-devel python-devel libxslt-devel openssl-devel libffi-devel
openSUSE 13.2 (SLE 12):
@@ -137,7 +135,7 @@ see virtualenv_.
.. _virtualenv: http://www.virtualenv.org/
If you want to run Keystone outside of a virtualenv, you can install the
-dependencies directly into your system from the requires files:
+dependencies directly into your system from the requirements files:
.. code-block:: bash
@@ -161,15 +159,12 @@ forget to activate it:
.. code-block:: bash
$ source .venv/bin/activate
- $ python
-You should then be able to `import keystone` from your Python shell
-without issue:
+You should then be able to `import keystone` using Python without issue:
-.. code-block:: python
+.. code-block:: bash
- >>> import keystone
- >>>
+ $ python -c "import keystone"
-If you can import Keystone successfully, you should be ready to move on to
-:doc:`developing`.
+If you can import Keystone without a traceback, you should be ready to move on
+to :doc:`developing`.
diff --git a/keystone-moon/etc/keystone-paste.ini b/keystone-moon/etc/keystone-paste.ini
index 24f167fa..70db3823 100644
--- a/keystone-moon/etc/keystone-paste.ini
+++ b/keystone-moon/etc/keystone-paste.ini
@@ -1,73 +1,67 @@
# Keystone PasteDeploy configuration file.
-[filter:moon]
-paste.filter_factory = keystone.contrib.moon.routers:Admin.factory
-
[filter:debug]
-paste.filter_factory = keystone.common.wsgi:Debug.factory
+use = egg:keystone#debug
[filter:request_id]
-paste.filter_factory = oslo_middleware:RequestId.factory
+use = egg:keystone#request_id
[filter:build_auth_context]
-paste.filter_factory = keystone.middleware:AuthContextMiddleware.factory
+use = egg:keystone#build_auth_context
[filter:token_auth]
-paste.filter_factory = keystone.middleware:TokenAuthMiddleware.factory
+use = egg:keystone#token_auth
[filter:admin_token_auth]
-paste.filter_factory = keystone.middleware:AdminTokenAuthMiddleware.factory
+use = egg:keystone#admin_token_auth
[filter:json_body]
-paste.filter_factory = keystone.middleware:JsonBodyMiddleware.factory
+use = egg:keystone#json_body
[filter:user_crud_extension]
-paste.filter_factory = keystone.contrib.user_crud:CrudExtension.factory
+use = egg:keystone#user_crud_extension
[filter:crud_extension]
-paste.filter_factory = keystone.contrib.admin_crud:CrudExtension.factory
+use = egg:keystone#crud_extension
[filter:ec2_extension]
-paste.filter_factory = keystone.contrib.ec2:Ec2Extension.factory
+use = egg:keystone#ec2_extension
[filter:ec2_extension_v3]
-paste.filter_factory = keystone.contrib.ec2:Ec2ExtensionV3.factory
+use = egg:keystone#ec2_extension_v3
[filter:federation_extension]
-paste.filter_factory = keystone.contrib.federation.routers:FederationExtension.factory
+use = egg:keystone#federation_extension
[filter:oauth1_extension]
-paste.filter_factory = keystone.contrib.oauth1.routers:OAuth1Extension.factory
+use = egg:keystone#oauth1_extension
[filter:s3_extension]
-paste.filter_factory = keystone.contrib.s3:S3Extension.factory
+use = egg:keystone#s3_extension
[filter:endpoint_filter_extension]
-paste.filter_factory = keystone.contrib.endpoint_filter.routers:EndpointFilterExtension.factory
-
-[filter:endpoint_policy_extension]
-paste.filter_factory = keystone.contrib.endpoint_policy.routers:EndpointPolicyExtension.factory
+use = egg:keystone#endpoint_filter_extension
[filter:simple_cert_extension]
-paste.filter_factory = keystone.contrib.simple_cert:SimpleCertExtension.factory
+use = egg:keystone#simple_cert_extension
[filter:revoke_extension]
-paste.filter_factory = keystone.contrib.revoke.routers:RevokeExtension.factory
+use = egg:keystone#revoke_extension
[filter:url_normalize]
-paste.filter_factory = keystone.middleware:NormalizingFilter.factory
+use = egg:keystone#url_normalize
[filter:sizelimit]
-paste.filter_factory = oslo_middleware.sizelimit:RequestBodySizeLimiter.factory
+use = egg:keystone#sizelimit
[app:public_service]
-paste.app_factory = keystone.service:public_app_factory
+use = egg:keystone#public_service
[app:service_v3]
-paste.app_factory = keystone.service:v3_app_factory
+use = egg:keystone#service_v3
[app:admin_service]
-paste.app_factory = keystone.service:admin_app_factory
+use = egg:keystone#admin_service
[pipeline:public_api]
# The last item in this pipeline must be public_service or an equivalent
@@ -82,13 +76,13 @@ pipeline = sizelimit url_normalize request_id build_auth_context token_auth admi
[pipeline:api_v3]
# The last item in this pipeline must be service_v3 or an equivalent
# application. It cannot be a filter.
-pipeline = sizelimit url_normalize request_id build_auth_context token_auth admin_token_auth json_body ec2_extension_v3 s3_extension simple_cert_extension revoke_extension federation_extension oauth1_extension endpoint_filter_extension endpoint_policy_extension service_v3
+pipeline = sizelimit url_normalize request_id build_auth_context token_auth admin_token_auth json_body ec2_extension_v3 s3_extension simple_cert_extension revoke_extension federation_extension oauth1_extension endpoint_filter_extension service_v3
[app:public_version_service]
-paste.app_factory = keystone.service:public_version_app_factory
+use = egg:keystone#public_version_service
[app:admin_version_service]
-paste.app_factory = keystone.service:admin_version_app_factory
+use = egg:keystone#admin_version_service
[pipeline:public_version_api]
pipeline = sizelimit url_normalize public_version_service
diff --git a/keystone-moon/etc/keystone.conf.sample b/keystone-moon/etc/keystone.conf.sample
index b3c741c8..ec5a08cc 100644
--- a/keystone-moon/etc/keystone.conf.sample
+++ b/keystone-moon/etc/keystone.conf.sample
@@ -11,13 +11,6 @@
# value)
#admin_token = ADMIN
-# (Deprecated) The port which the OpenStack Compute service listens on. This
-# option was only used for string replacement in the templated catalog backend.
-# Templated catalogs should replace the "$(compute_port)s" substitution with
-# the static port of the compute service. As of Juno, this option is deprecated
-# and will be removed in the L release. (integer value)
-#compute_port = 8774
-
# The base public endpoint URL for Keystone that is advertised to clients
# (NOTE: this does NOT affect how Keystone listens for connections). Defaults
# to the base host URL of the request. E.g. a request to
@@ -57,7 +50,9 @@
# The value passed as the keyword "rounds" to passlib's encrypt method.
# (integer value)
-#crypt_strength = 40000
+# Minimum value: 1000
+# Maximum value: 100000
+#crypt_strength = 10000
# The maximum number of entities that will be returned in a collection, with no
# limit set by default. This global limit may be then overridden for a specific
@@ -93,48 +88,23 @@
# Define the notification format for Identity Service events. A "basic"
# notification has information about the resource being operated on. A "cadf"
# notification has the same information, as well as information about the
-# initiator of the event. Valid options are: basic and cadf (string value)
+# initiator of the event. (string value)
+# Allowed values: basic, cadf
#notification_format = basic
#
-# From keystone.openstack.common.eventlet_backdoor
-#
-
-# Enable eventlet backdoor. Acceptable values are 0, <port>, and
-# <start>:<end>, where 0 results in listening on a random tcp port number;
-# <port> results in listening on the specified port number (and not enabling
-# backdoor if that port is in use); and <start>:<end> results in listening on
-# the smallest unused port number within the specified range of port numbers.
-# The chosen port is displayed in the service's log file. (string value)
-#backdoor_port = <None>
-
-#
-# From keystone.openstack.common.policy
-#
-
-# The JSON file that defines policies. (string value)
-#policy_file = policy.json
-
-# Default rule. Enforced when a requested rule is not found. (string value)
-#policy_default_rule = default
-
-# Directories where policy configuration files are stored. They can be relative
-# to any directory in the search path defined by the config_dir option, or
-# absolute paths. The file defined by policy_file must exist for these
-# directories to be searched. (multi valued)
-#policy_dirs = policy.d
-
-#
# From oslo.log
#
-# Print debugging output (set logging level to DEBUG instead of default WARNING
+# Print debugging output (set logging level to DEBUG instead of default INFO
# level). (boolean value)
#debug = false
-# Print more verbose output (set logging level to INFO instead of default
-# WARNING level). (boolean value)
-#verbose = false
+# If set to false, will disable INFO logging level, making WARNING the default.
+# (boolean value)
+# This option is deprecated for removal.
+# Its value may be silently ignored in the future.
+#verbose = true
# The name of a logging configuration file. This file is appended to any
# existing logging configuration files. For details about logging configuration
@@ -162,15 +132,17 @@
# Deprecated group/name - [DEFAULT]/logdir
#log_dir = <None>
-# Use syslog for logging. Existing syslog format is DEPRECATED during I, and
-# will change in J to honor RFC5424. (boolean value)
+# Use syslog for logging. Existing syslog format is DEPRECATED and will be
+# changed later to honor RFC5424. (boolean value)
#use_syslog = false
# (Optional) Enables or disables syslog rfc5424 format for logging. If enabled,
# prefixes the MSG part of the syslog message with APP-NAME (RFC5424). The
-# format without the APP-NAME is deprecated in I, and will be removed in J.
-# (boolean value)
-#use_syslog_rfc_format = false
+# format without the APP-NAME is deprecated in Kilo, and will be removed in
+# Mitaka, along with this option. (boolean value)
+# This option is deprecated for removal.
+# Its value may be silently ignored in the future.
+#use_syslog_rfc_format = true
# Syslog facility to receive log lines. (string value)
#syslog_log_facility = LOG_USER
@@ -188,17 +160,14 @@
#logging_debug_format_suffix = %(funcName)s %(pathname)s:%(lineno)d
# Prefix each line of exception output with this format. (string value)
-#logging_exception_prefix = %(asctime)s.%(msecs)03d %(process)d TRACE %(name)s %(instance)s
+#logging_exception_prefix = %(asctime)s.%(msecs)03d %(process)d ERROR %(name)s %(instance)s
# List of logger=LEVEL pairs. (list value)
-#default_log_levels = amqp=WARN,amqplib=WARN,boto=WARN,qpid=WARN,sqlalchemy=WARN,suds=INFO,oslo.messaging=INFO,iso8601=WARN,requests.packages.urllib3.connectionpool=WARN,urllib3.connectionpool=WARN,websocket=WARN,keystonemiddleware=WARN,routes.middleware=WARN,stevedore=WARN
+#default_log_levels = amqp=WARN,amqplib=WARN,boto=WARN,qpid=WARN,sqlalchemy=WARN,suds=INFO,oslo.messaging=INFO,iso8601=WARN,requests.packages.urllib3.connectionpool=WARN,urllib3.connectionpool=WARN,websocket=WARN,requests.packages.urllib3.util.retry=WARN,urllib3.util.retry=WARN,keystonemiddleware=WARN,routes.middleware=WARN,stevedore=WARN,taskflow=WARN
# Enables or disables publication of error events. (boolean value)
#publish_errors = false
-# Enables or disables fatal status of deprecations. (boolean value)
-#fatal_deprecations = false
-
# The format for an instance that is passed with the log message. (string
# value)
#instance_format = "[instance: %(uuid)s] "
@@ -207,16 +176,23 @@
# value)
#instance_uuid_format = "[instance: %(uuid)s] "
+# Enables or disables fatal status of deprecations. (boolean value)
+#fatal_deprecations = false
+
#
# From oslo.messaging
#
+# Size of RPC connection pool. (integer value)
+# Deprecated group/name - [DEFAULT]/rpc_conn_pool_size
+#rpc_conn_pool_size = 30
+
# ZeroMQ bind address. Should be a wildcard (*), an ethernet interface, or IP.
# The "host" option should point or resolve to this address. (string value)
#rpc_zmq_bind_address = *
# MatchMaker driver. (string value)
-#rpc_zmq_matchmaker = oslo_messaging._drivers.matchmaker.MatchMakerLocalhost
+#rpc_zmq_matchmaker = local
# ZeroMQ receiver listening port. (integer value)
#rpc_zmq_port = 9501
@@ -245,10 +221,12 @@
# Heartbeat time-to-live. (integer value)
#matchmaker_heartbeat_ttl = 600
-# Size of RPC thread pool. (integer value)
-#rpc_thread_pool_size = 64
+# Size of executor thread pool. (integer value)
+# Deprecated group/name - [DEFAULT]/rpc_thread_pool_size
+#executor_thread_pool_size = 64
-# Driver or drivers to handle sending notifications. (multi valued)
+# The Drivers(s) to handle sending notifications. Possible values are
+# messaging, messagingv2, routing, log, test, noop (multi valued)
#notification_driver =
# AMQP topic used for OpenStack notifications. (list value)
@@ -271,6 +249,22 @@
# exchange name specified in the transport_url option. (string value)
#control_exchange = keystone
+#
+# From oslo.service.service
+#
+
+# Enable eventlet backdoor. Acceptable values are 0, <port>, and
+# <start>:<end>, where 0 results in listening on a random tcp port number;
+# <port> results in listening on the specified port number (and not enabling
+# backdoor if that port is in use); and <start>:<end> results in listening on
+# the smallest unused port number within the specified range of port numbers.
+# The chosen port is displayed in the service's log file. (string value)
+#backdoor_port = <None>
+
+# Enables or disables logging values of all registered options when starting a
+# service (at DEBUG level). (boolean value)
+#log_options = true
+
[assignment]
@@ -278,7 +272,10 @@
# From keystone
#
-# Assignment backend driver. (string value)
+# Entrypoint for the assignment backend driver in the keystone.assignment
+# namespace. Supplied drivers are ldap and sql. If an assignment driver is not
+# specified, the identity driver will choose the assignment driver. (string
+# value)
#driver = <None>
@@ -288,17 +285,25 @@
# From keystone
#
-# Default auth methods. (list value)
-#methods = external,password,token
+# Allowed authentication methods. (list value)
+#methods = external,password,token,oauth1
+
+# Entrypoint for the password auth plugin module in the keystone.auth.password
+# namespace. (string value)
+#password = <None>
-# The password auth plugin module. (string value)
-#password = keystone.auth.plugins.password.Password
+# Entrypoint for the token auth plugin module in the keystone.auth.token
+# namespace. (string value)
+#token = <None>
-# The token auth plugin module. (string value)
-#token = keystone.auth.plugins.token.Token
+# Entrypoint for the external (REMOTE_USER) auth plugin module in the
+# keystone.auth.external namespace. Supplied drivers are DefaultDomain and
+# Domain. The default driver is DefaultDomain. (string value)
+#external = <None>
-# The external (REMOTE_USER) auth plugin module. (string value)
-#external = keystone.auth.plugins.external.DefaultDomain
+# Entrypoint for the oAuth1.0 auth plugin module in the keystone.auth.oauth1
+# namespace. (string value)
+#oauth1 = <None>
[cache]
@@ -379,8 +384,10 @@
# value)
#template_file = default_catalog.templates
-# Catalog backend driver. (string value)
-#driver = keystone.catalog.backends.sql.Catalog
+# Entrypoint for the catalog backend driver in the keystone.catalog namespace.
+# Supplied drivers are kvs, sql, templated, and endpoint_filter.sql (string
+# value)
+#driver = sql
# Toggle for catalog caching. This has no effect unless global caching is
# enabled. (boolean value)
@@ -395,14 +402,71 @@
#list_limit = <None>
+[cors]
+
+#
+# From oslo.middleware
+#
+
+# Indicate whether this resource may be shared with the domain received in the
+# requests "origin" header. (string value)
+#allowed_origin = <None>
+
+# Indicate that the actual request can include user credentials (boolean value)
+#allow_credentials = true
+
+# Indicate which headers are safe to expose to the API. Defaults to HTTP Simple
+# Headers. (list value)
+#expose_headers = Content-Type,Cache-Control,Content-Language,Expires,Last-Modified,Pragma
+
+# Maximum cache age of CORS preflight requests. (integer value)
+#max_age = 3600
+
+# Indicate which methods can be used during the actual request. (list value)
+#allow_methods = GET,POST,PUT,DELETE,OPTIONS
+
+# Indicate which header field names may be used during the actual request.
+# (list value)
+#allow_headers = Content-Type,Cache-Control,Content-Language,Expires,Last-Modified,Pragma
+
+
+[cors.subdomain]
+
+#
+# From oslo.middleware
+#
+
+# Indicate whether this resource may be shared with the domain received in the
+# requests "origin" header. (string value)
+#allowed_origin = <None>
+
+# Indicate that the actual request can include user credentials (boolean value)
+#allow_credentials = true
+
+# Indicate which headers are safe to expose to the API. Defaults to HTTP Simple
+# Headers. (list value)
+#expose_headers = Content-Type,Cache-Control,Content-Language,Expires,Last-Modified,Pragma
+
+# Maximum cache age of CORS preflight requests. (integer value)
+#max_age = 3600
+
+# Indicate which methods can be used during the actual request. (list value)
+#allow_methods = GET,POST,PUT,DELETE,OPTIONS
+
+# Indicate which header field names may be used during the actual request.
+# (list value)
+#allow_headers = Content-Type,Cache-Control,Content-Language,Expires,Last-Modified,Pragma
+
+
[credential]
#
# From keystone
#
-# Credential backend driver. (string value)
-#driver = keystone.credential.backends.sql.Credential
+# Entrypoint for the credential backend driver in the keystone.credential
+# namespace. (string value)
+#driver = sql
[database]
@@ -505,14 +569,34 @@
#db_max_retries = 20
+[domain_config]
+
+#
+# From keystone
+#
+
+# Entrypoint for the domain config backend driver in the
+# keystone.resource.domain_config namespace. (string value)
+#driver = sql
+
+# Toggle for domain config caching. This has no effect unless global caching is
+# enabled. (boolean value)
+#caching = true
+
+# TTL (in seconds) to cache domain config data. This has no effect unless
+# domain config caching is enabled. (integer value)
+#cache_time = 300
+
+
[endpoint_filter]
#
# From keystone
#
-# Endpoint Filter backend driver (string value)
-#driver = keystone.contrib.endpoint_filter.backends.sql.EndpointFilter
+# Entrypoint for the endpoint filter backend driver in the
+# keystone.endpoint_filter namespace. (string value)
+#driver = sql
# Toggle to return all active endpoints if no filter exists. (boolean value)
#return_all_endpoints_if_no_filter = true
@@ -524,8 +608,12 @@
# From keystone
#
-# Endpoint policy backend driver (string value)
-#driver = keystone.contrib.endpoint_policy.backends.sql.EndpointPolicy
+# Enable endpoint_policy functionality. (boolean value)
+#enabled = true
+
+# Entrypoint for the endpoint policy backend driver in the
+# keystone.endpoint_policy namespace. (string value)
+#driver = sql
[eventlet_server]
@@ -537,42 +625,71 @@
# The number of worker processes to serve the public eventlet application.
# Defaults to number of CPUs (minimum of 2). (integer value)
# Deprecated group/name - [DEFAULT]/public_workers
+# This option is deprecated for removal.
+# Its value may be silently ignored in the future.
#public_workers = <None>
# The number of worker processes to serve the admin eventlet application.
# Defaults to number of CPUs (minimum of 2). (integer value)
# Deprecated group/name - [DEFAULT]/admin_workers
+# This option is deprecated for removal.
+# Its value may be silently ignored in the future.
#admin_workers = <None>
# The IP address of the network interface for the public service to listen on.
# (string value)
# Deprecated group/name - [DEFAULT]/bind_host
# Deprecated group/name - [DEFAULT]/public_bind_host
+# This option is deprecated for removal.
+# Its value may be silently ignored in the future.
#public_bind_host = 0.0.0.0
# The port number which the public service listens on. (integer value)
+# Minimum value: 1
+# Maximum value: 65535
# Deprecated group/name - [DEFAULT]/public_port
+# This option is deprecated for removal.
+# Its value may be silently ignored in the future.
#public_port = 5000
# The IP address of the network interface for the admin service to listen on.
# (string value)
# Deprecated group/name - [DEFAULT]/bind_host
# Deprecated group/name - [DEFAULT]/admin_bind_host
+# This option is deprecated for removal.
+# Its value may be silently ignored in the future.
#admin_bind_host = 0.0.0.0
# The port number which the admin service listens on. (integer value)
+# Minimum value: 1
+# Maximum value: 65535
# Deprecated group/name - [DEFAULT]/admin_port
+# This option is deprecated for removal.
+# Its value may be silently ignored in the future.
#admin_port = 35357
+# If set to false, disables keepalives on the server; all connections will be
+# closed after serving one request. (boolean value)
+#wsgi_keep_alive = true
+
+# Timeout for socket operations on a client connection. If an incoming
+# connection is idle for this number of seconds it will be closed. A value of
+# '0' means wait forever. (integer value)
+#client_socket_timeout = 900
+
# Set this to true if you want to enable TCP_KEEPALIVE on server sockets, i.e.
# sockets used by the Keystone wsgi server for client connections. (boolean
# value)
# Deprecated group/name - [DEFAULT]/tcp_keepalive
+# This option is deprecated for removal.
+# Its value may be silently ignored in the future.
#tcp_keepalive = false
# Sets the value of TCP_KEEPIDLE in seconds for each server socket. Only
# applies if tcp_keepalive is true. (integer value)
# Deprecated group/name - [DEFAULT]/tcp_keepidle
+# This option is deprecated for removal.
+# Its value may be silently ignored in the future.
#tcp_keepidle = 600
@@ -584,24 +701,34 @@
# Toggle for SSL support on the Keystone eventlet servers. (boolean value)
# Deprecated group/name - [ssl]/enable
+# This option is deprecated for removal.
+# Its value may be silently ignored in the future.
#enable = false
# Path of the certfile for SSL. For non-production environments, you may be
# interested in using `keystone-manage ssl_setup` to generate self-signed
# certificates. (string value)
# Deprecated group/name - [ssl]/certfile
+# This option is deprecated for removal.
+# Its value may be silently ignored in the future.
#certfile = /etc/keystone/ssl/certs/keystone.pem
# Path of the keyfile for SSL. (string value)
# Deprecated group/name - [ssl]/keyfile
+# This option is deprecated for removal.
+# Its value may be silently ignored in the future.
#keyfile = /etc/keystone/ssl/private/keystonekey.pem
# Path of the CA cert file for SSL. (string value)
# Deprecated group/name - [ssl]/ca_certs
+# This option is deprecated for removal.
+# Its value may be silently ignored in the future.
#ca_certs = /etc/keystone/ssl/certs/ca.pem
# Require client certificate. (boolean value)
# Deprecated group/name - [ssl]/cert_required
+# This option is deprecated for removal.
+# Its value may be silently ignored in the future.
#cert_required = false
@@ -611,8 +738,9 @@
# From keystone
#
-# Federation backend driver. (string value)
-#driver = keystone.contrib.federation.backends.sql.Federation
+# Entrypoint for the federation backend driver in the keystone.federation
+# namespace. (string value)
+#driver = sql
# Value to be used when filtering assertion parameters from the environment.
# (string value)
@@ -626,9 +754,7 @@
# A domain name that is reserved to allow federated ephemeral users to have a
# domain concept. Note that an admin will not be able to create a domain with
# this name or update an existing domain to this name. You are not advised to
-# change this value unless you really have to. Changing this option to empty
-# string or None will not have any impact and default name will be used.
-# (string value)
+# change this value unless you really have to. (string value)
#federated_domain_name = Federated
# A list of trusted dashboard hosts. Before accepting a Single Sign-On request
@@ -685,16 +811,17 @@
# Extract the domain specific configuration options from the resource backend
# where they have been stored with the domain data. This feature is disabled by
# default (in which case the domain specific options will be loaded from files
-# in the domain configuration directory); set to true to enable. This feature
-# is not yet supported. (boolean value)
+# in the domain configuration directory); set to true to enable. (boolean
+# value)
#domain_configurations_from_database = false
# Path for Keystone to locate the domain specific identity configuration files
# if domain_specific_drivers_enabled is set to true. (string value)
#domain_config_dir = /etc/keystone/domains
-# Identity backend driver. (string value)
-#driver = keystone.identity.backends.sql.Identity
+# Entrypoint for the identity backend driver in the keystone.identity
+# namespace. Supplied drivers are ldap and sql. (string value)
+#driver = sql
# Toggle for identity caching. This has no effect unless global caching is
# enabled. (boolean value)
@@ -706,6 +833,7 @@
# Maximum supported length for user passwords; decrease to improve performance.
# (integer value)
+# Maximum value: 4096
#max_password_length = 4096
# Maximum number of entities that will be returned in an identity collection.
@@ -719,13 +847,14 @@
# From keystone
#
-# Keystone Identity Mapping backend driver. (string value)
-#driver = keystone.identity.mapping_backends.sql.Mapping
+# Entrypoint for the identity mapping backend driver in the
+# keystone.identity.id_mapping namespace. (string value)
+#driver = sql
-# Public ID generator for user and group entities. The Keystone identity mapper
-# only supports generators that produce no more than 64 characters. (string
-# value)
-#generator = keystone.identity.id_generators.sha256.Generator
+# Entrypoint for the public ID generator for user and group entities in the
+# keystone.identity.id_generator namespace. The Keystone identity mapper only
+# supports generators that produce no more than 64 characters. (string value)
+#generator = sha256
# The format of user and group IDs changed in Juno for backends that do not
# generate UUIDs (e.g. LDAP), with keystone providing a hash mapping to the
@@ -763,7 +892,7 @@
# always leave this set to true. (boolean value)
#enable_key_mangler = true
-# Default lock timeout for distributed locking. (integer value)
+# Default lock timeout (in seconds) for distributed locking. (integer value)
#default_lock_timeout = 5
@@ -797,18 +926,18 @@
# your LDAP server supports subtree deletion. (boolean value)
#allow_subtree_delete = false
-# The LDAP scope for queries, this can be either "one" (onelevel/singleLevel)
-# or "sub" (subtree/wholeSubtree). (string value)
+# The LDAP scope for queries, "one" represents oneLevel/singleLevel and "sub"
+# represents subtree/wholeSubtree options. (string value)
+# Allowed values: one, sub
#query_scope = one
# Maximum results per page; a value of zero ("0") disables paging. (integer
# value)
#page_size = 0
-# The LDAP dereferencing option for queries. This can be either "never",
-# "searching", "always", "finding" or "default". The "default" option falls
-# back to using default dereferencing configured by your ldap.conf. (string
-# value)
+# The LDAP dereferencing option for queries. The "default" option falls back to
+# using default dereferencing configured by your ldap.conf. (string value)
+# Allowed values: never, searching, always, finding, default
#alias_dereferencing = default
# Sets the LDAP debugging level for LDAP calls. A value of 0 means that
@@ -820,7 +949,7 @@
# value)
#chase_referrals = <None>
-# Search base for users. (string value)
+# Search base for users. Defaults to the suffix value. (string value)
#user_tree_dn = <None>
# LDAP search filter for users. (string value)
@@ -867,7 +996,7 @@
#user_enabled_default = True
# List of attributes stripped off the user on update. (list value)
-#user_attribute_ignore = default_project_id,tenants
+#user_attribute_ignore = default_project_id
# LDAP attribute mapped to default_project_id for users. (string value)
#user_default_project_id_attribute = <None>
@@ -896,111 +1025,165 @@
# Identity API attribute. (list value)
#user_additional_attribute_mapping =
-# Search base for projects (string value)
+# Search base for projects. Defaults to the suffix value. (string value)
# Deprecated group/name - [ldap]/tenant_tree_dn
+# This option is deprecated for removal.
+# Its value may be silently ignored in the future.
#project_tree_dn = <None>
# LDAP search filter for projects. (string value)
# Deprecated group/name - [ldap]/tenant_filter
+# This option is deprecated for removal.
+# Its value may be silently ignored in the future.
#project_filter = <None>
# LDAP objectclass for projects. (string value)
# Deprecated group/name - [ldap]/tenant_objectclass
+# This option is deprecated for removal.
+# Its value may be silently ignored in the future.
#project_objectclass = groupOfNames
# LDAP attribute mapped to project id. (string value)
# Deprecated group/name - [ldap]/tenant_id_attribute
+# This option is deprecated for removal.
+# Its value may be silently ignored in the future.
#project_id_attribute = cn
# LDAP attribute mapped to project membership for user. (string value)
# Deprecated group/name - [ldap]/tenant_member_attribute
+# This option is deprecated for removal.
+# Its value may be silently ignored in the future.
#project_member_attribute = member
# LDAP attribute mapped to project name. (string value)
# Deprecated group/name - [ldap]/tenant_name_attribute
+# This option is deprecated for removal.
+# Its value may be silently ignored in the future.
#project_name_attribute = ou
# LDAP attribute mapped to project description. (string value)
# Deprecated group/name - [ldap]/tenant_desc_attribute
+# This option is deprecated for removal.
+# Its value may be silently ignored in the future.
#project_desc_attribute = description
# LDAP attribute mapped to project enabled. (string value)
# Deprecated group/name - [ldap]/tenant_enabled_attribute
+# This option is deprecated for removal.
+# Its value may be silently ignored in the future.
#project_enabled_attribute = enabled
# LDAP attribute mapped to project domain_id. (string value)
# Deprecated group/name - [ldap]/tenant_domain_id_attribute
+# This option is deprecated for removal.
+# Its value may be silently ignored in the future.
#project_domain_id_attribute = businessCategory
# List of attributes stripped off the project on update. (list value)
# Deprecated group/name - [ldap]/tenant_attribute_ignore
+# This option is deprecated for removal.
+# Its value may be silently ignored in the future.
#project_attribute_ignore =
# Allow project creation in LDAP backend. (boolean value)
# Deprecated group/name - [ldap]/tenant_allow_create
+# This option is deprecated for removal.
+# Its value may be silently ignored in the future.
#project_allow_create = true
# Allow project update in LDAP backend. (boolean value)
# Deprecated group/name - [ldap]/tenant_allow_update
+# This option is deprecated for removal.
+# Its value may be silently ignored in the future.
#project_allow_update = true
# Allow project deletion in LDAP backend. (boolean value)
# Deprecated group/name - [ldap]/tenant_allow_delete
+# This option is deprecated for removal.
+# Its value may be silently ignored in the future.
#project_allow_delete = true
# If true, Keystone uses an alternative method to determine if a project is
# enabled or not by checking if they are a member of the
# "project_enabled_emulation_dn" group. (boolean value)
# Deprecated group/name - [ldap]/tenant_enabled_emulation
+# This option is deprecated for removal.
+# Its value may be silently ignored in the future.
#project_enabled_emulation = false
# DN of the group entry to hold enabled projects when using enabled emulation.
# (string value)
# Deprecated group/name - [ldap]/tenant_enabled_emulation_dn
+# This option is deprecated for removal.
+# Its value may be silently ignored in the future.
#project_enabled_emulation_dn = <None>
# Additional attribute mappings for projects. Attribute mapping format is
# <ldap_attr>:<user_attr>, where ldap_attr is the attribute in the LDAP entry
# and user_attr is the Identity API attribute. (list value)
# Deprecated group/name - [ldap]/tenant_additional_attribute_mapping
+# This option is deprecated for removal.
+# Its value may be silently ignored in the future.
#project_additional_attribute_mapping =
-# Search base for roles. (string value)
+# Search base for roles. Defaults to the suffix value. (string value)
+# This option is deprecated for removal.
+# Its value may be silently ignored in the future.
#role_tree_dn = <None>
# LDAP search filter for roles. (string value)
+# This option is deprecated for removal.
+# Its value may be silently ignored in the future.
#role_filter = <None>
# LDAP objectclass for roles. (string value)
+# This option is deprecated for removal.
+# Its value may be silently ignored in the future.
#role_objectclass = organizationalRole
# LDAP attribute mapped to role id. (string value)
+# This option is deprecated for removal.
+# Its value may be silently ignored in the future.
#role_id_attribute = cn
# LDAP attribute mapped to role name. (string value)
+# This option is deprecated for removal.
+# Its value may be silently ignored in the future.
#role_name_attribute = ou
# LDAP attribute mapped to role membership. (string value)
+# This option is deprecated for removal.
+# Its value may be silently ignored in the future.
#role_member_attribute = roleOccupant
# List of attributes stripped off the role on update. (list value)
+# This option is deprecated for removal.
+# Its value may be silently ignored in the future.
#role_attribute_ignore =
# Allow role creation in LDAP backend. (boolean value)
+# This option is deprecated for removal.
+# Its value may be silently ignored in the future.
#role_allow_create = true
# Allow role update in LDAP backend. (boolean value)
+# This option is deprecated for removal.
+# Its value may be silently ignored in the future.
#role_allow_update = true
# Allow role deletion in LDAP backend. (boolean value)
+# This option is deprecated for removal.
+# Its value may be silently ignored in the future.
#role_allow_delete = true
# Additional attribute mappings for roles. Attribute mapping format is
# <ldap_attr>:<user_attr>, where ldap_attr is the attribute in the LDAP entry
# and user_attr is the Identity API attribute. (list value)
+# This option is deprecated for removal.
+# Its value may be silently ignored in the future.
#role_additional_attribute_mapping =
-# Search base for groups. (string value)
+# Search base for groups. Defaults to the suffix value. (string value)
#group_tree_dn = <None>
# LDAP search filter for groups. (string value)
@@ -1048,7 +1231,9 @@
# Enable TLS for communicating with LDAP servers. (boolean value)
#use_tls = false
-# Valid options for tls_req_cert are demand, never, and allow. (string value)
+# Specifies what checks to perform on client certificates in an incoming TLS
+# session. (string value)
+# Allowed values: demand, never, allow
#tls_req_cert = demand
# Enable LDAP connection pooling. (boolean value)
@@ -1151,8 +1336,9 @@
# From keystone
#
-# Credential backend driver. (string value)
-#driver = keystone.contrib.oauth1.backends.sql.OAuth1
+# Entrypoint for hte OAuth backend driver in the keystone.oauth1 namespace.
+# (string value)
+#driver = sql
# Duration (in seconds) for the OAuth Request Token. (integer value)
#request_token_duration = 28800
@@ -1202,7 +1388,7 @@
# Deprecated group/name - [amqp1]/trace
#trace = false
-# CA certificate PEM file for verifing server certificate (string value)
+# CA certificate PEM file to verify server certificate (string value)
# Deprecated group/name - [amqp1]/ssl_ca_file
#ssl_ca_file =
@@ -1230,6 +1416,7 @@
#
# Use durable queues in AMQP. (boolean value)
+# Deprecated group/name - [DEFAULT]/amqp_durable_queues
# Deprecated group/name - [DEFAULT]/rabbit_durable_queues
#amqp_durable_queues = false
@@ -1237,9 +1424,15 @@
# Deprecated group/name - [DEFAULT]/amqp_auto_delete
#amqp_auto_delete = false
-# Size of RPC connection pool. (integer value)
-# Deprecated group/name - [DEFAULT]/rpc_conn_pool_size
-#rpc_conn_pool_size = 30
+# Send a single AMQP reply to call message. The current behaviour since oslo-
+# incubator is to send two AMQP replies - first one with the payload, a second
+# one to ensure the other have finish to send the payload. We are going to
+# remove it in the N release, but we must keep backward compatible at the same
+# time. This option provides such compatibility - it defaults to False in
+# Liberty and can be turned on for early adopters with a new installations or
+# for testing. Please note, that this option will be removed in the Mitaka
+# release. (boolean value)
+#send_single_reply = false
# Qpid broker hostname. (string value)
# Deprecated group/name - [DEFAULT]/qpid_hostname
@@ -1296,6 +1489,7 @@
#
# Use durable queues in AMQP. (boolean value)
+# Deprecated group/name - [DEFAULT]/amqp_durable_queues
# Deprecated group/name - [DEFAULT]/rabbit_durable_queues
#amqp_durable_queues = false
@@ -1303,9 +1497,15 @@
# Deprecated group/name - [DEFAULT]/amqp_auto_delete
#amqp_auto_delete = false
-# Size of RPC connection pool. (integer value)
-# Deprecated group/name - [DEFAULT]/rpc_conn_pool_size
-#rpc_conn_pool_size = 30
+# Send a single AMQP reply to call message. The current behaviour since oslo-
+# incubator is to send two AMQP replies - first one with the payload, a second
+# one to ensure the other have finish to send the payload. We are going to
+# remove it in the N release, but we must keep backward compatible at the same
+# time. This option provides such compatibility - it defaults to False in
+# Liberty and can be turned on for early adopters with a new installations or
+# for testing. Please note, that this option will be removed in the Mitaka
+# release. (boolean value)
+#send_single_reply = false
# SSL version to use (valid only if SSL enabled). Valid values are TLSv1 and
# SSLv23. SSLv2, SSLv3, TLSv1_1, and TLSv1_2 may be available on some
@@ -1330,6 +1530,10 @@
# Deprecated group/name - [DEFAULT]/kombu_reconnect_delay
#kombu_reconnect_delay = 1.0
+# How long to wait before considering a reconnect attempt to have failed. This
+# value should not be longer than rpc_response_timeout. (integer value)
+#kombu_reconnect_timeout = 60
+
# The RabbitMQ broker address where a single node is used. (string value)
# Deprecated group/name - [DEFAULT]/rabbit_host
#rabbit_host = localhost
@@ -1380,6 +1584,15 @@
# Deprecated group/name - [DEFAULT]/rabbit_ha_queues
#rabbit_ha_queues = false
+# Number of seconds after which the Rabbit broker is considered down if
+# heartbeat's keep-alive fails (0 disable the heartbeat). EXPERIMENTAL (integer
+# value)
+#heartbeat_timeout_threshold = 60
+
+# How often times during the heartbeat_timeout_threshold we check the
+# heartbeat. (integer value)
+#heartbeat_rate = 2
+
# Deprecated, use rpc_backend=kombu+memory or rpc_backend=fake (boolean value)
# Deprecated group/name - [DEFAULT]/fake_rabbit
#fake_rabbit = false
@@ -1396,6 +1609,40 @@
# Deprecated group/name - [DEFAULT]/max_request_body_size
#max_request_body_size = 114688
+#
+# From oslo.middleware
+#
+
+# The HTTP Header that will be used to determine what the original request
+# protocol scheme was, even if it was hidden by an SSL termination proxy.
+# (string value)
+#secure_proxy_ssl_header = X-Forwarded-Proto
+
+
+[oslo_policy]
+
+#
+# From oslo.policy
+#
+
+# The JSON file that defines policies. (string value)
+# Deprecated group/name - [DEFAULT]/policy_file
+#policy_file = policy.json
+
+# Default rule. Enforced when a requested rule is not found. (string value)
+# Deprecated group/name - [DEFAULT]/policy_default_rule
+#policy_default_rule = default
+
+# Directories where policy configuration files are stored. They can be relative
+# to any directory in the search path defined by the config_dir option, or
+# absolute paths. The file defined by policy_file must exist for these
+# directories to be searched. Missing or empty directories are ignored. (multi
+# valued)
+# Deprecated group/name - [DEFAULT]/policy_dirs
+# This option is deprecated for removal.
+# Its value may be silently ignored in the future.
+#policy_dirs = policy.d
+
[paste_deploy]
@@ -1414,8 +1661,9 @@
# From keystone
#
-# Policy backend driver. (string value)
-#driver = keystone.policy.backends.sql.Policy
+# Entrypoint for the policy backend driver in the keystone.policy namespace.
+# Supplied drivers are rules and sql. (string value)
+#driver = sql
# Maximum number of entities that will be returned in a policy collection.
# (integer value)
@@ -1428,8 +1676,10 @@
# From keystone
#
-# Resource backend driver. If a resource driver is not specified, the
-# assignment driver will choose the resource driver. (string value)
+# Entrypoint for the resource backend driver in the keystone.resource
+# namespace. Supplied drivers are ldap and sql. If a resource driver is not
+# specified, the assignment driver will choose the resource driver. (string
+# value)
#driver = <None>
# Toggle for resource caching. This has no effect unless global caching is
@@ -1454,9 +1704,10 @@
# From keystone
#
-# An implementation of the backend for persisting revocation events. (string
-# value)
-#driver = keystone.contrib.revoke.backends.sql.Revoke
+# Entrypoint for an implementation of the backend for persisting revocation
+# events in the keystone.revoke namespace. Supplied drivers are kvs and sql.
+# (string value)
+#driver = sql
# This value (calculated in seconds) is added to token expiration before a
# revocation event may be removed from the backend. (integer value)
@@ -1466,6 +1717,12 @@
# is enabled. (boolean value)
#caching = true
+# Time to cache the revocation list and the revocation events (in seconds).
+# This has no effect unless global and token caching are enabled. (integer
+# value)
+# Deprecated group/name - [token]/revocation_cache_time
+#cache_time = 3600
+
[role]
@@ -1473,7 +1730,8 @@
# From keystone
#
-# Role backend driver. (string value)
+# Entrypoint for the role backend driver in the keystone.role namespace.
+# Supplied drivers are ldap and sql. (string value)
#driver = <None>
# Toggle for role caching. This has no effect unless global caching is enabled.
@@ -1551,14 +1809,19 @@
# Telephone number of contact person. (string value)
#idp_contact_telephone = <None>
-# Contact type. Allowed values are: technical, support, administrative billing,
-# and other (string value)
+# The contact type describing the main point of contact for the identity
+# provider. (string value)
+# Allowed values: technical, support, administrative, billing, other
#idp_contact_type = other
# Path to the Identity Provider Metadata file. This file should be generated
# with the keystone-manage saml_idp_metadata command. (string value)
#idp_metadata_path = /etc/keystone/saml2_idp_metadata.xml
+# The prefix to use for the RelayState SAML attribute, used when generating ECP
+# wrapped assertions. (string value)
+#relay_state_prefix = ss:mem:
+
[signing]
@@ -1582,6 +1845,7 @@
# Key size (in bits) for token signing cert (auto generated certificate).
# (integer value)
+# Minimum value: 1024
#key_size = 2048
# Days the token signing cert is valid for (auto generated certificate).
@@ -1603,6 +1867,7 @@
#ca_key = /etc/keystone/ssl/private/cakey.pem
# SSL key length (in bits) (auto generated certificate). (integer value)
+# Minimum value: 1024
#key_size = 1024
# Days the certificate is valid for once signed (auto generated certificate).
@@ -1632,23 +1897,20 @@
# Amount of time a token should remain valid (in seconds). (integer value)
#expiration = 3600
-# Controls the token construction, validation, and revocation operations. Core
-# providers are "keystone.token.providers.[fernet|pkiz|pki|uuid].Provider". The
-# default provider is uuid. (string value)
-#provider = keystone.token.providers.uuid.Provider
+# Controls the token construction, validation, and revocation operations.
+# Entrypoint in the keystone.token.provider namespace. Core providers are
+# [fernet|pkiz|pki|uuid]. (string value)
+#provider = uuid
-# Token persistence backend driver. (string value)
-#driver = keystone.token.persistence.backends.sql.Token
+# Entrypoint for the token persistence backend driver in the
+# keystone.token.persistence namespace. Supplied drivers are kvs, memcache,
+# memcache_pool, and sql. (string value)
+#driver = sql
# Toggle for token system caching. This has no effect unless global caching is
# enabled. (boolean value)
#caching = true
-# Time to cache the revocation list and the revocation events if revoke
-# extension is enabled (in seconds). This has no effect unless global and token
-# caching are enabled. (integer value)
-#revocation_cache_time = 3600
-
# Time to cache tokens (in seconds). This has no effect unless global and token
# caching are enabled. (integer value)
#cache_time = <None>
@@ -1688,29 +1950,6 @@
# Maximum depth of trust redelegation. (integer value)
#max_redelegation_count = 3
-# Trust backend driver. (string value)
-#driver = keystone.trust.backends.sql.Trust
-
-
-[moon]
-
-# Authorisation backend driver. (string value)
-#authz_driver = keystone.contrib.moon.backends.flat.SuperExtensionConnector
-
-# Moon Log driver. (string value)
-#log_driver = keystone.contrib.moon.backends.flat.LogConnector
-
-# SuperExtension backend driver. (string value)
-#superextension_driver = keystone.contrib.moon.backends.flat.SuperExtensionConnector
-
-# IntraExtension backend driver. (string value)
-#intraextension_driver = keystone.contrib.moon.backends.sql.IntraExtensionConnector
-
-# Tenant backend driver. (string value)
-#tenant_driver = keystone.contrib.moon.backends.sql.TenantConnector
-
-# Local directory where all policies are stored. (string value)
-#policy_directory = /etc/keystone/policies
-
-# Local directory where SuperExtension configuration is stored. (string value)
-#super_extension_directory = /etc/keystone/super_extension
+# Entrypoint for the trust backend driver in the keystone.trust namespace.
+# (string value)
+#driver = sql
diff --git a/keystone-moon/etc/policy.json b/keystone-moon/etc/policy.json
index f0a081d3..ebb94b02 100644
--- a/keystone-moon/etc/policy.json
+++ b/keystone-moon/etc/policy.json
@@ -4,6 +4,9 @@
"service_or_admin": "rule:admin_required or rule:service_role",
"owner" : "user_id:%(user_id)s",
"admin_or_owner": "rule:admin_required or rule:owner",
+ "token_subject": "user_id:%(target.token.user_id)s",
+ "admin_or_token_subject": "rule:admin_required or rule:token_subject",
+ "service_admin_or_token_subject": "rule:service_or_admin or rule:token_subject",
"default": "rule:admin_required",
@@ -86,14 +89,13 @@
"identity:update_policy": "rule:admin_required",
"identity:delete_policy": "rule:admin_required",
- "identity:check_token": "rule:admin_required",
- "identity:validate_token": "rule:service_or_admin",
+ "identity:check_token": "rule:admin_or_token_subject",
+ "identity:validate_token": "rule:service_admin_or_token_subject",
"identity:validate_token_head": "rule:service_or_admin",
"identity:revocation_list": "rule:service_or_admin",
- "identity:revoke_token": "rule:admin_or_owner",
+ "identity:revoke_token": "rule:admin_or_token_subject",
"identity:create_trust": "user_id:%(trust.trustor_user_id)s",
- "identity:get_trust": "rule:admin_or_owner",
"identity:list_trusts": "",
"identity:list_roles_for_trust": "",
"identity:get_role_for_trust": "",
@@ -126,6 +128,7 @@
"identity:list_projects_associated_with_endpoint_group": "rule:admin_required",
"identity:list_endpoints_associated_with_endpoint_group": "rule:admin_required",
"identity:get_endpoint_group_in_project": "rule:admin_required",
+ "identity:list_endpoint_groups_for_project": "rule:admin_required",
"identity:add_endpoint_group_to_project": "rule:admin_required",
"identity:remove_endpoint_group_from_project": "rule:admin_required",
diff --git a/keystone-moon/etc/policy.v3cloudsample.json b/keystone-moon/etc/policy.v3cloudsample.json
index a15b33f2..a96996c6 100644
--- a/keystone-moon/etc/policy.v3cloudsample.json
+++ b/keystone-moon/etc/policy.v3cloudsample.json
@@ -7,6 +7,7 @@
"admin_or_owner": "(rule:admin_required and domain_id:%(target.token.user.domain.id)s) or rule:owner",
"admin_or_cloud_admin": "rule:admin_required or rule:cloud_admin",
"admin_and_matching_domain_id": "rule:admin_required and domain_id:%(domain_id)s",
+ "service_admin_or_owner": "rule:service_or_admin or rule:owner",
"default": "rule:admin_required",
@@ -28,7 +29,7 @@
"identity:update_endpoint": "rule:cloud_admin",
"identity:delete_endpoint": "rule:cloud_admin",
- "identity:get_domain": "rule:cloud_admin",
+ "identity:get_domain": "rule:cloud_admin or rule:admin_and_matching_domain_id",
"identity:list_domains": "rule:cloud_admin",
"identity:create_domain": "rule:cloud_admin",
"identity:update_domain": "rule:cloud_admin",
@@ -88,9 +89,9 @@
"identity:create_grant": "rule:cloud_admin or rule:domain_admin_for_grants or rule:project_admin_for_grants",
"identity:revoke_grant": "rule:cloud_admin or rule:domain_admin_for_grants or rule:project_admin_for_grants",
- "admin_on_domain_filter" : "rule:cloud_admin or (rule:admin_required and domain_id:%(scope.domain.id)s)",
- "admin_on_project_filter" : "rule:cloud_admin or (rule:admin_required and project_id:%(scope.project.id)s)",
- "identity:list_role_assignments": "rule:admin_on_domain_filter or rule:admin_on_project_filter",
+ "admin_on_domain_filter" : "rule:admin_required and domain_id:%(scope.domain.id)s",
+ "admin_on_project_filter" : "rule:admin_required and project_id:%(scope.project.id)s",
+ "identity:list_role_assignments": "rule:cloud_admin or rule:admin_on_domain_filter or rule:admin_on_project_filter",
"identity:get_policy": "rule:cloud_admin",
"identity:list_policies": "rule:cloud_admin",
@@ -100,13 +101,12 @@
"identity:change_password": "rule:owner",
"identity:check_token": "rule:admin_or_owner",
- "identity:validate_token": "rule:service_or_admin",
+ "identity:validate_token": "rule:service_admin_or_owner",
"identity:validate_token_head": "rule:service_or_admin",
"identity:revocation_list": "rule:service_or_admin",
"identity:revoke_token": "rule:admin_or_owner",
"identity:create_trust": "user_id:%(trust.trustor_user_id)s",
- "identity:get_trust": "rule:admin_or_owner",
"identity:list_trusts": "",
"identity:list_roles_for_trust": "",
"identity:get_role_for_trust": "",
@@ -139,6 +139,7 @@
"identity:list_projects_associated_with_endpoint_group": "rule:admin_required",
"identity:list_endpoints_associated_with_endpoint_group": "rule:admin_required",
"identity:get_endpoint_group_in_project": "rule:admin_required",
+ "identity:list_endpoint_groups_for_project": "rule:admin_required",
"identity:add_endpoint_group_to_project": "rule:admin_required",
"identity:remove_endpoint_group_from_project": "rule:admin_required",
diff --git a/keystone-moon/httpd/README b/keystone-moon/httpd/README
index c4f5a800..35a32fc0 100644
--- a/keystone-moon/httpd/README
+++ b/keystone-moon/httpd/README
@@ -1,2 +1,2 @@
-Documentation how to set up Keystone to run with Apache HTTPD is in
+Documentation for running Keystone with Apache HTTPD is in
doc/source/apache-httpd.rst
diff --git a/keystone-moon/httpd/wsgi-keystone.conf b/keystone-moon/httpd/wsgi-keystone.conf
index f191818f..c2224d42 100644
--- a/keystone-moon/httpd/wsgi-keystone.conf
+++ b/keystone-moon/httpd/wsgi-keystone.conf
@@ -2,9 +2,9 @@ Listen 5000
Listen 35357
<VirtualHost *:5000>
- WSGIDaemonProcess keystone-public processes=5 threads=1 user=keystone display-name=%{GROUP}
+ WSGIDaemonProcess keystone-public processes=5 threads=1 user=keystone group=keystone display-name=%{GROUP}
WSGIProcessGroup keystone-public
- WSGIScriptAlias / /var/www/cgi-bin/keystone/main
+ WSGIScriptAlias / /usr/local/bin/keystone-wsgi-public
WSGIApplicationGroup %{GLOBAL}
WSGIPassAuthorization On
<IfVersion >= 2.4>
@@ -12,12 +12,22 @@ Listen 35357
</IfVersion>
ErrorLog /var/log/apache2/keystone.log
CustomLog /var/log/apache2/keystone_access.log combined
+
+ <Directory /usr/local/bin>
+ <IfVersion >= 2.4>
+ Require all granted
+ </IfVersion>
+ <IfVersion < 2.4>
+ Order allow,deny
+ Allow from all
+ </IfVersion>
+ </Directory>
</VirtualHost>
<VirtualHost *:35357>
- WSGIDaemonProcess keystone-admin processes=5 threads=1 user=keystone display-name=%{GROUP}
+ WSGIDaemonProcess keystone-admin processes=5 threads=1 user=keystone group=keystone display-name=%{GROUP}
WSGIProcessGroup keystone-admin
- WSGIScriptAlias / /var/www/cgi-bin/keystone/admin
+ WSGIScriptAlias / /usr/local/bin/keystone-wsgi-admin
WSGIApplicationGroup %{GLOBAL}
WSGIPassAuthorization On
<IfVersion >= 2.4>
@@ -25,4 +35,14 @@ Listen 35357
</IfVersion>
ErrorLog /var/log/apache2/keystone.log
CustomLog /var/log/apache2/keystone_access.log combined
+
+ <Directory /usr/local/bin>
+ <IfVersion >= 2.4>
+ Require all granted
+ </IfVersion>
+ <IfVersion < 2.4>
+ Order allow,deny
+ Allow from all
+ </IfVersion>
+ </Directory>
</VirtualHost>
diff --git a/keystone-moon/keystone/assignment/backends/ldap.py b/keystone-moon/keystone/assignment/backends/ldap.py
index f93e989f..4ca66c4d 100644
--- a/keystone-moon/keystone/assignment/backends/ldap.py
+++ b/keystone-moon/keystone/assignment/backends/ldap.py
@@ -13,10 +13,10 @@
# under the License.
from __future__ import absolute_import
-import ldap as ldap
import ldap.filter
from oslo_config import cfg
from oslo_log import log
+from oslo_log import versionutils
from keystone import assignment
from keystone.assignment.role_backends import ldap as ldap_role
@@ -25,7 +25,6 @@ from keystone.common import models
from keystone import exception
from keystone.i18n import _
from keystone.identity.backends import ldap as ldap_identity
-from keystone.openstack.common import versionutils
CONF = cfg.CONF
@@ -36,7 +35,7 @@ class Assignment(assignment.Driver):
@versionutils.deprecated(
versionutils.deprecated.KILO,
remove_in=+2,
- what='keystone.assignment.backends.ldap.Assignment')
+ what='ldap')
def __init__(self):
super(Assignment, self).__init__()
self.LDAP_URL = CONF.ldap.url
@@ -54,10 +53,10 @@ class Assignment(assignment.Driver):
self.role = RoleApi(CONF, self.user)
def default_role_driver(self):
- return 'keystone.assignment.role_backends.ldap.Role'
+ return 'ldap'
def default_resource_driver(self):
- return 'keystone.resource.backends.ldap.Resource'
+ return 'ldap'
def list_role_ids_for_groups_on_project(
self, groups, project_id, project_domain_id, project_parents):
@@ -181,7 +180,7 @@ class Assignment(assignment.Driver):
self.group._id_to_dn(group_id), role_id)
# Bulk actions on User From identity
- def delete_user(self, user_id):
+ def delete_user_assignments(self, user_id):
user_dn = self.user._id_to_dn(user_id)
for ref in self.role.list_global_roles_for_user(user_dn):
self.role.delete_user(ref.role_dn, ref.user_dn,
@@ -191,7 +190,7 @@ class Assignment(assignment.Driver):
self.role.delete_user(ref.role_dn, ref.user_dn,
self.role._dn_to_id(ref.role_dn))
- def delete_group(self, group_id):
+ def delete_group_assignments(self, group_id):
"""Called when the group was deleted.
Any role assignments for the group should be cleaned up.
@@ -277,20 +276,39 @@ class Assignment(assignment.Driver):
return self._roles_from_role_dicts(metadata_ref.get('roles', []),
inherited_to_projects)
- def list_role_assignments(self):
+ def list_role_assignments(self, role_id=None,
+ user_id=None, group_ids=None,
+ domain_id=None, project_ids=None,
+ inherited_to_projects=None):
role_assignments = []
- for a in self.role.list_role_assignments(self.project.tree_dn):
- if isinstance(a, UserRoleAssociation):
- assignment = {
- 'role_id': self.role._dn_to_id(a.role_dn),
- 'user_id': self.user._dn_to_id(a.user_dn),
- 'project_id': self.project._dn_to_id(a.project_dn)}
- else:
- assignment = {
- 'role_id': self.role._dn_to_id(a.role_dn),
- 'group_id': self.group._dn_to_id(a.group_dn),
- 'project_id': self.project._dn_to_id(a.project_dn)}
- role_assignments.append(assignment)
+
+ # Since the LDAP backend does not support assignments to domains, if
+ # the request is to filter by domain, then the answer is guaranteed
+ # to be an empty list.
+ if not domain_id:
+ for a in self.role.list_role_assignments(self.project.tree_dn):
+ if isinstance(a, UserRoleAssociation):
+ assignment = {
+ 'role_id': self.role._dn_to_id(a.role_dn),
+ 'user_id': self.user._dn_to_id(a.user_dn),
+ 'project_id': self.project._dn_to_id(a.project_dn)}
+ else:
+ assignment = {
+ 'role_id': self.role._dn_to_id(a.role_dn),
+ 'group_id': self.group._dn_to_id(a.group_dn),
+ 'project_id': self.project._dn_to_id(a.project_dn)}
+
+ if role_id and assignment['role_id'] != role_id:
+ continue
+ if user_id and assignment.get('user_id') != user_id:
+ continue
+ if group_ids and assignment.get('group_id') not in group_ids:
+ continue
+ if project_ids and assignment['project_id'] not in project_ids:
+ continue
+
+ role_assignments.append(assignment)
+
return role_assignments
def delete_project_assignments(self, project_id):
@@ -313,9 +331,7 @@ class ProjectApi(common_ldap.ProjectLdapStructureMixin,
or self.DEFAULT_MEMBER_ATTRIBUTE)
def get_user_projects(self, user_dn, associations):
- """Returns list of tenants a user has access to
- """
-
+ """Returns the list of tenants to which a user has access."""
project_ids = set()
for assoc in associations:
project_ids.add(self._dn_to_id(assoc.project_dn))
@@ -497,9 +513,7 @@ class RoleApi(ldap_role.RoleLdapStructureMixin, common_ldap.BaseLdap):
self.id_attr: role_id})
def list_role_assignments(self, project_tree_dn):
- """Returns a list of all the role assignments linked to project_tree_dn
- attribute.
- """
+ """List the role assignments linked to project_tree_dn attribute."""
try:
roles = self._ldap_get_list(project_tree_dn, ldap.SCOPE_SUBTREE,
attrlist=[self.member_attribute])
diff --git a/keystone-moon/keystone/assignment/backends/sql.py b/keystone-moon/keystone/assignment/backends/sql.py
index 2de6ca60..89ff64b5 100644
--- a/keystone-moon/keystone/assignment/backends/sql.py
+++ b/keystone-moon/keystone/assignment/backends/sql.py
@@ -14,7 +14,6 @@
from oslo_config import cfg
from oslo_log import log
-import six
import sqlalchemy
from sqlalchemy.sql.expression import false
@@ -53,10 +52,10 @@ class AssignmentType(object):
class Assignment(keystone_assignment.Driver):
def default_role_driver(self):
- return "keystone.assignment.role_backends.sql.Role"
+ return 'sql'
def default_resource_driver(self):
- return 'keystone.resource.backends.sql.Resource'
+ return 'sql'
def list_user_ids_for_project(self, tenant_id):
with sql.transaction() as session:
@@ -336,7 +335,62 @@ class Assignment(keystone_assignment.Driver):
'Cannot remove role that has not been granted, %s') %
role_id)
- def list_role_assignments(self):
+ def _get_user_assignment_types(self):
+ return [AssignmentType.USER_PROJECT, AssignmentType.USER_DOMAIN]
+
+ def _get_group_assignment_types(self):
+ return [AssignmentType.GROUP_PROJECT, AssignmentType.GROUP_DOMAIN]
+
+ def _get_project_assignment_types(self):
+ return [AssignmentType.USER_PROJECT, AssignmentType.GROUP_PROJECT]
+
+ def _get_domain_assignment_types(self):
+ return [AssignmentType.USER_DOMAIN, AssignmentType.GROUP_DOMAIN]
+
+ def _get_assignment_types(self, user, group, project, domain):
+ """Returns a list of role assignment types based on provided entities
+
+ If one of user or group (the "actor") as well as one of project or
+ domain (the "target") are provided, the list will contain the role
+ assignment type for that specific pair of actor and target.
+
+ If only an actor or target is provided, the list will contain the
+ role assignment types that satisfy the specified entity.
+
+ For example, if user and project are provided, the return will be:
+
+ [AssignmentType.USER_PROJECT]
+
+ However, if only user was provided, the return would be:
+
+ [AssignmentType.USER_PROJECT, AssignmentType.USER_DOMAIN]
+
+ It is not expected that user and group (or project and domain) are
+ specified - but if they are, the most fine-grained value will be
+ chosen (i.e. user over group, project over domain).
+
+ """
+ actor_types = []
+ if user:
+ actor_types = self._get_user_assignment_types()
+ elif group:
+ actor_types = self._get_group_assignment_types()
+
+ target_types = []
+ if project:
+ target_types = self._get_project_assignment_types()
+ elif domain:
+ target_types = self._get_domain_assignment_types()
+
+ if actor_types and target_types:
+ return list(set(actor_types).intersection(target_types))
+
+ return actor_types or target_types
+
+ def list_role_assignments(self, role_id=None,
+ user_id=None, group_ids=None,
+ domain_id=None, project_ids=None,
+ inherited_to_projects=None):
def denormalize_role(ref):
assignment = {}
@@ -362,8 +416,35 @@ class Assignment(keystone_assignment.Driver):
return assignment
with sql.transaction() as session:
- refs = session.query(RoleAssignment).all()
- return [denormalize_role(ref) for ref in refs]
+ assignment_types = self._get_assignment_types(
+ user_id, group_ids, project_ids, domain_id)
+
+ targets = None
+ if project_ids:
+ targets = project_ids
+ elif domain_id:
+ targets = [domain_id]
+
+ actors = None
+ if group_ids:
+ actors = group_ids
+ elif user_id:
+ actors = [user_id]
+
+ query = session.query(RoleAssignment)
+
+ if role_id:
+ query = query.filter_by(role_id=role_id)
+ if actors:
+ query = query.filter(RoleAssignment.actor_id.in_(actors))
+ if targets:
+ query = query.filter(RoleAssignment.target_id.in_(targets))
+ if assignment_types:
+ query = query.filter(RoleAssignment.type.in_(assignment_types))
+ if inherited_to_projects is not None:
+ query = query.filter_by(inherited=inherited_to_projects)
+
+ return [denormalize_role(ref) for ref in query.all()]
def delete_project_assignments(self, project_id):
with sql.transaction() as session:
@@ -377,13 +458,13 @@ class Assignment(keystone_assignment.Driver):
q = q.filter_by(role_id=role_id)
q.delete(False)
- def delete_user(self, user_id):
+ def delete_user_assignments(self, user_id):
with sql.transaction() as session:
q = session.query(RoleAssignment)
q = q.filter_by(actor_id=user_id)
q.delete(False)
- def delete_group(self, group_id):
+ def delete_group_assignments(self, group_id):
with sql.transaction() as session:
q = session.query(RoleAssignment)
q = q.filter_by(actor_id=group_id)
@@ -399,12 +480,15 @@ class RoleAssignment(sql.ModelBase, sql.DictBase):
AssignmentType.USER_DOMAIN, AssignmentType.GROUP_DOMAIN,
name='type'),
nullable=False)
- actor_id = sql.Column(sql.String(64), nullable=False, index=True)
+ actor_id = sql.Column(sql.String(64), nullable=False)
target_id = sql.Column(sql.String(64), nullable=False)
role_id = sql.Column(sql.String(64), nullable=False)
inherited = sql.Column(sql.Boolean, default=False, nullable=False)
- __table_args__ = (sql.PrimaryKeyConstraint('type', 'actor_id', 'target_id',
- 'role_id'), {})
+ __table_args__ = (
+ sql.PrimaryKeyConstraint('type', 'actor_id', 'target_id', 'role_id',
+ 'inherited'),
+ sql.Index('ix_actor_id', 'actor_id'),
+ )
def to_dict(self):
"""Override parent to_dict() method with a simpler implementation.
@@ -412,4 +496,4 @@ class RoleAssignment(sql.ModelBase, sql.DictBase):
RoleAssignment doesn't have non-indexed 'extra' attributes, so the
parent implementation is not applicable.
"""
- return dict(six.iteritems(self))
+ return dict(self.items())
diff --git a/keystone-moon/keystone/assignment/controllers.py b/keystone-moon/keystone/assignment/controllers.py
index ff27fd36..d33dce70 100644
--- a/keystone-moon/keystone/assignment/controllers.py
+++ b/keystone-moon/keystone/assignment/controllers.py
@@ -15,7 +15,6 @@
"""Workflow Logic the Assignment service."""
-import copy
import functools
import uuid
@@ -26,10 +25,10 @@ from six.moves import urllib
from keystone.assignment import schema
from keystone.common import controller
from keystone.common import dependency
+from keystone.common import utils
from keystone.common import validation
from keystone import exception
-from keystone.i18n import _, _LW
-from keystone.models import token_model
+from keystone.i18n import _
from keystone import notifications
@@ -51,18 +50,11 @@ class TenantAssignment(controller.V2Controller):
Doesn't care about token scopedness.
"""
- try:
- token_data = self.token_provider_api.validate_token(
- context['token_id'])
- token_ref = token_model.KeystoneToken(token_id=context['token_id'],
- token_data=token_data)
- except exception.NotFound as e:
- LOG.warning(_LW('Authentication failed: %s'), e)
- raise exception.Unauthorized(e)
+ token_ref = utils.get_token_ref(context)
tenant_refs = (
self.assignment_api.list_projects_for_user(token_ref.user_id))
- tenant_refs = [self.filter_domain_id(ref) for ref in tenant_refs
+ tenant_refs = [self.v3_to_v2_project(ref) for ref in tenant_refs
if ref['domain_id'] == CONF.identity.default_domain_id]
params = {
'limit': context['query_string'].get('limit'),
@@ -107,7 +99,14 @@ class Role(controller.V2Controller):
msg = _('Name field is required and cannot be empty')
raise exception.ValidationError(message=msg)
- role_id = uuid.uuid4().hex
+ if role['name'] == CONF.member_role_name:
+ # Use the configured member role ID when creating the configured
+ # member role name. This avoids the potential of creating a
+ # "member" role with an unexpected ID.
+ role_id = CONF.member_role_id
+ else:
+ role_id = uuid.uuid4().hex
+
role['id'] = role_id
role_ref = self.role_api.create_role(role_id, role)
return {'role': role_ref}
@@ -152,8 +151,8 @@ class RoleAssignmentV2(controller.V2Controller):
"""
self.assert_admin(context)
if tenant_id is None:
- raise exception.NotImplemented(message='User roles not supported: '
- 'tenant_id required')
+ raise exception.NotImplemented(
+ message=_('User roles not supported: tenant_id required'))
self.assignment_api.add_role_to_user_and_project(
user_id, tenant_id, role_id)
@@ -171,8 +170,8 @@ class RoleAssignmentV2(controller.V2Controller):
"""
self.assert_admin(context)
if tenant_id is None:
- raise exception.NotImplemented(message='User roles not supported: '
- 'tenant_id required')
+ raise exception.NotImplemented(
+ message=_('User roles not supported: tenant_id required'))
# This still has the weird legacy semantics that adding a role to
# a user also adds them to a tenant, so we must follow up on that
@@ -282,7 +281,16 @@ class RoleV3(controller.V3Controller):
@controller.protected()
@validation.validated(schema.role_create, 'role')
def create_role(self, context, role):
- ref = self._assign_unique_id(self._normalize_dict(role))
+ if role['name'] == CONF.member_role_name:
+ # Use the configured member role ID when creating the configured
+ # member role name. This avoids the potential of creating a
+ # "member" role with an unexpected ID.
+ role['id'] = CONF.member_role_id
+ else:
+ role = self._assign_unique_id(role)
+
+ ref = self._normalize_dict(role)
+
initiator = notifications._get_request_audit_info(context)
ref = self.role_api.create_role(ref['id'], ref, initiator)
return RoleV3.wrap_member(context, ref)
@@ -452,16 +460,25 @@ class RoleAssignmentV3(controller.V3Controller):
actor (e.g. user or group), target (e.g. domain or project) and role.
If it is an inherited role, then this is also indicated. Examples:
+ For a non-inherited expanded assignment from group membership:
{'user_id': user_id,
- 'project_id': domain_id,
- 'role_id': role_id}
+ 'project_id': project_id,
+ 'role_id': role_id,
+ 'indirect': {'group_id': group_id}}
- or, for an inherited role:
+ or, for a project inherited role:
{'user_id': user_id,
- 'domain_id': domain_id,
+ 'project_id': project_id,
'role_id': role_id,
- 'inherited_to_projects': true}
+ 'indirect': {'project_id': parent_id}}
+
+ It is possible to deduce if a role assignment came from group
+ membership if it has both 'user_id' in the main body of the dict and
+ 'group_id' in the 'indirect' subdict, as well as it is possible to
+ deduce if it has come from inheritance if it contains both a
+ 'project_id' in the main body of the dict and 'parent_id' in the
+ 'indirect' subdict.
This function maps this into the format to be returned via the API,
e.g. for the second example above:
@@ -471,262 +488,71 @@ class RoleAssignmentV3(controller.V3Controller):
{'id': user_id}
},
'scope': {
- 'domain': {
- {'id': domain_id}
+ 'project': {
+ {'id': project_id}
},
- 'OS-INHERIT:inherited_to': 'projects
+ 'OS-INHERIT:inherited_to': 'projects'
},
'role': {
{'id': role_id}
},
'links': {
- 'assignment': '/domains/domain_id/users/user_id/roles/'
- 'role_id/inherited_to_projects'
+ 'assignment': '/OS-INHERIT/projects/parent_id/users/user_id/'
+ 'roles/role_id/inherited_to_projects'
}
}
"""
- formatted_entity = {}
- suffix = ""
- if 'user_id' in entity:
- formatted_entity['user'] = {'id': entity['user_id']}
- actor_link = 'users/%s' % entity['user_id']
- if 'group_id' in entity:
- formatted_entity['group'] = {'id': entity['group_id']}
- actor_link = 'groups/%s' % entity['group_id']
- if 'role_id' in entity:
- formatted_entity['role'] = {'id': entity['role_id']}
+ formatted_entity = {'links': {}}
+ inherited_assignment = entity.get('inherited_to_projects')
+
if 'project_id' in entity:
formatted_entity['scope'] = (
{'project': {'id': entity['project_id']}})
- if 'inherited_to_projects' in entity:
- formatted_entity['scope']['OS-INHERIT:inherited_to'] = (
- 'projects')
- target_link = '/OS-INHERIT/projects/%s' % entity['project_id']
- suffix = '/inherited_to_projects'
- else:
- target_link = '/projects/%s' % entity['project_id']
- if 'domain_id' in entity:
- formatted_entity['scope'] = (
- {'domain': {'id': entity['domain_id']}})
- if 'inherited_to_projects' in entity:
- formatted_entity['scope']['OS-INHERIT:inherited_to'] = (
- 'projects')
- target_link = '/OS-INHERIT/domains/%s' % entity['domain_id']
- suffix = '/inherited_to_projects'
- else:
- target_link = '/domains/%s' % entity['domain_id']
- formatted_entity.setdefault('links', {})
-
- path = '%(target)s/%(actor)s/roles/%(role)s%(suffix)s' % {
- 'target': target_link,
- 'actor': actor_link,
- 'role': entity['role_id'],
- 'suffix': suffix}
- formatted_entity['links']['assignment'] = self.base_url(context, path)
-
- return formatted_entity
- def _expand_indirect_assignments(self, context, refs):
- """Processes entity list into all-direct assignments.
-
- For any group role assignments in the list, create a role assignment
- entity for each member of that group, and then remove the group
- assignment entity itself from the list.
+ if 'domain_id' in entity.get('indirect', {}):
+ inherited_assignment = True
+ formatted_link = ('/domains/%s' %
+ entity['indirect']['domain_id'])
+ elif 'project_id' in entity.get('indirect', {}):
+ inherited_assignment = True
+ formatted_link = ('/projects/%s' %
+ entity['indirect']['project_id'])
+ else:
+ formatted_link = '/projects/%s' % entity['project_id']
+ elif 'domain_id' in entity:
+ formatted_entity['scope'] = {'domain': {'id': entity['domain_id']}}
+ formatted_link = '/domains/%s' % entity['domain_id']
- If the OS-INHERIT extension is enabled, then honor any inherited
- roles on the domain by creating the equivalent on all projects
- owned by the domain.
+ if 'user_id' in entity:
+ formatted_entity['user'] = {'id': entity['user_id']}
- For any new entity created by virtue of group membership, add in an
- additional link to that membership.
+ if 'group_id' in entity.get('indirect', {}):
+ membership_url = (
+ self.base_url(context, '/groups/%s/users/%s' % (
+ entity['indirect']['group_id'], entity['user_id'])))
+ formatted_entity['links']['membership'] = membership_url
+ formatted_link += '/groups/%s' % entity['indirect']['group_id']
+ else:
+ formatted_link += '/users/%s' % entity['user_id']
+ elif 'group_id' in entity:
+ formatted_entity['group'] = {'id': entity['group_id']}
+ formatted_link += '/groups/%s' % entity['group_id']
- """
- def _get_group_members(ref):
- """Get a list of group members.
+ formatted_entity['role'] = {'id': entity['role_id']}
+ formatted_link += '/roles/%s' % entity['role_id']
- Get the list of group members. If this fails with
- GroupNotFound, then log this as a warning, but allow
- overall processing to continue.
+ if inherited_assignment:
+ formatted_entity['scope']['OS-INHERIT:inherited_to'] = (
+ 'projects')
+ formatted_link = ('/OS-INHERIT%s/inherited_to_projects' %
+ formatted_link)
- """
- try:
- members = self.identity_api.list_users_in_group(
- ref['group']['id'])
- except exception.GroupNotFound:
- members = []
- # The group is missing, which should not happen since
- # group deletion should remove any related assignments, so
- # log a warning
- target = 'Unknown'
- # Should always be a domain or project, but since to get
- # here things have gone astray, let's be cautious.
- if 'scope' in ref:
- if 'domain' in ref['scope']:
- dom_id = ref['scope']['domain'].get('id', 'Unknown')
- target = 'Domain: %s' % dom_id
- elif 'project' in ref['scope']:
- proj_id = ref['scope']['project'].get('id', 'Unknown')
- target = 'Project: %s' % proj_id
- role_id = 'Unknown'
- if 'role' in ref and 'id' in ref['role']:
- role_id = ref['role']['id']
- LOG.warning(
- _LW('Group %(group)s not found for role-assignment - '
- '%(target)s with Role: %(role)s'), {
- 'group': ref['group']['id'], 'target': target,
- 'role': role_id})
- return members
-
- def _build_user_assignment_equivalent_of_group(
- user, group_id, template):
- """Create a user assignment equivalent to the group one.
-
- The template has had the 'group' entity removed, so
- substitute a 'user' one. The 'assignment' link stays as it is,
- referring to the group assignment that led to this role.
- A 'membership' link is added that refers to this particular
- user's membership of this group.
-
- """
- user_entry = copy.deepcopy(template)
- user_entry['user'] = {'id': user['id']}
- user_entry['links']['membership'] = (
- self.base_url(context, '/groups/%s/users/%s' %
- (group_id, user['id'])))
- return user_entry
-
- def _build_project_equivalent_of_user_target_role(
- project_id, target_id, target_type, template):
- """Create a user project assignment equivalent to the domain one.
-
- The template has had the 'domain' entity removed, so
- substitute a 'project' one, modifying the 'assignment' link
- to match.
-
- """
- project_entry = copy.deepcopy(template)
- project_entry['scope']['project'] = {'id': project_id}
- project_entry['links']['assignment'] = (
- self.base_url(
- context,
- '/OS-INHERIT/%s/%s/users/%s/roles/%s'
- '/inherited_to_projects' % (
- target_type, target_id, project_entry['user']['id'],
- project_entry['role']['id'])))
- return project_entry
-
- def _build_project_equivalent_of_group_target_role(
- user_id, group_id, project_id,
- target_id, target_type, template):
- """Create a user project equivalent to the domain group one.
-
- The template has had the 'domain' and 'group' entities removed, so
- substitute a 'user-project' one, modifying the 'assignment' link
- to match.
-
- """
- project_entry = copy.deepcopy(template)
- project_entry['user'] = {'id': user_id}
- project_entry['scope']['project'] = {'id': project_id}
- project_entry['links']['assignment'] = (
- self.base_url(context,
- '/OS-INHERIT/%s/%s/groups/%s/roles/%s'
- '/inherited_to_projects' % (
- target_type, target_id, group_id,
- project_entry['role']['id'])))
- project_entry['links']['membership'] = (
- self.base_url(context, '/groups/%s/users/%s' %
- (group_id, user_id)))
- return project_entry
-
- # Scan the list of entities for any assignments that need to be
- # expanded.
- #
- # If the OS-INERIT extension is enabled, the refs lists may
- # contain roles to be inherited from domain to project, so expand
- # these as well into project equivalents
- #
- # For any regular group entries, expand these into user entries based
- # on membership of that group.
- #
- # Due to the potentially large expansions, rather than modify the
- # list we are enumerating, we build a new one as we go.
- #
-
- new_refs = []
- for r in refs:
- if 'OS-INHERIT:inherited_to' in r['scope']:
- if 'domain' in r['scope']:
- # It's an inherited domain role - so get the list of
- # projects owned by this domain.
- project_ids = (
- [x['id'] for x in
- self.resource_api.list_projects_in_domain(
- r['scope']['domain']['id'])])
- base_entry = copy.deepcopy(r)
- target_type = 'domains'
- target_id = base_entry['scope']['domain']['id']
- base_entry['scope'].pop('domain')
- else:
- # It's an inherited project role - so get the list of
- # projects in this project subtree.
- project_id = r['scope']['project']['id']
- project_ids = (
- [x['id'] for x in
- self.resource_api.list_projects_in_subtree(
- project_id)])
- base_entry = copy.deepcopy(r)
- target_type = 'projects'
- target_id = base_entry['scope']['project']['id']
- base_entry['scope'].pop('project')
-
- # For each project, create an equivalent role assignment
- for p in project_ids:
- # If it's a group assignment, then create equivalent user
- # roles based on membership of the group
- if 'group' in base_entry:
- members = _get_group_members(base_entry)
- sub_entry = copy.deepcopy(base_entry)
- group_id = sub_entry['group']['id']
- sub_entry.pop('group')
- for m in members:
- new_entry = (
- _build_project_equivalent_of_group_target_role(
- m['id'], group_id, p,
- target_id, target_type, sub_entry))
- new_refs.append(new_entry)
- else:
- new_entry = (
- _build_project_equivalent_of_user_target_role(
- p, target_id, target_type, base_entry))
- new_refs.append(new_entry)
- elif 'group' in r:
- # It's a non-inherited group role assignment, so get the list
- # of members.
- members = _get_group_members(r)
-
- # Now replace that group role assignment entry with an
- # equivalent user role assignment for each of the group members
- base_entry = copy.deepcopy(r)
- group_id = base_entry['group']['id']
- base_entry.pop('group')
- for m in members:
- user_entry = _build_user_assignment_equivalent_of_group(
- m, group_id, base_entry)
- new_refs.append(user_entry)
- else:
- new_refs.append(r)
+ formatted_entity['links']['assignment'] = self.base_url(context,
+ formatted_link)
- return new_refs
-
- def _filter_inherited(self, entry):
- if ('inherited_to_projects' in entry and
- not CONF.os_inherit.enabled):
- return False
- else:
- return True
+ return formatted_entity
def _assert_effective_filters(self, inherited, group, domain):
"""Assert that useless filter combinations are avoided.
@@ -762,13 +588,28 @@ class RoleAssignmentV3(controller.V3Controller):
'scope.domain.id', 'scope.project.id',
'scope.OS-INHERIT:inherited_to', 'user.id')
def list_role_assignments(self, context, filters):
+ """List role assignments to user and groups on domains and projects.
+
+ Return a list of all existing role assignments in the system, filtered
+ by assignments attributes, if provided.
- # TODO(henry-nash): This implementation uses the standard filtering
- # in the V3.wrap_collection. Given the large number of individual
- # assignments, this is pretty inefficient. An alternative would be
- # to pass the filters into the driver call, so that the list size is
- # kept a minimum.
+ If effective option is used and OS-INHERIT extension is enabled, the
+ following functions will be applied:
+ 1) For any group role assignment on a target, replace it by a set of
+ role assignments containing one for each user of that group on that
+ target;
+ 2) For any inherited role assignment for an actor on a target, replace
+ it by a set of role assignments for that actor on every project under
+ that target.
+ It means that, if effective mode is used, no group or domain inherited
+ assignments will be present in the resultant list. Thus, combining
+ effective with them is invalid.
+
+ As a role assignment contains only one actor and one target, providing
+ both user and group ids or domain and project ids is invalid as well.
+
+ """
params = context['query_string']
effective = 'effective' in params and (
self.query_filter_is_true(params['effective']))
@@ -791,17 +632,17 @@ class RoleAssignmentV3(controller.V3Controller):
domain=params.get(
'scope.domain.id'))
- hints = self.build_driver_hints(context, filters)
- refs = self.assignment_api.list_role_assignments()
- formatted_refs = (
- [self._format_entity(context, x) for x in refs
- if self._filter_inherited(x)])
+ refs = self.assignment_api.list_role_assignments(
+ role_id=params.get('role.id'),
+ user_id=params.get('user.id'),
+ group_id=params.get('group.id'),
+ domain_id=params.get('scope.domain.id'),
+ project_id=params.get('scope.project.id'),
+ inherited=inherited, effective=effective)
- if effective:
- formatted_refs = self._expand_indirect_assignments(context,
- formatted_refs)
+ formatted_refs = [self._format_entity(context, ref) for ref in refs]
- return self.wrap_collection(context, formatted_refs, hints=hints)
+ return self.wrap_collection(context, formatted_refs)
@controller.protected()
def get_role_assignment(self, context):
diff --git a/keystone-moon/keystone/assignment/core.py b/keystone-moon/keystone/assignment/core.py
index 0f9c03e9..a001e6b1 100644
--- a/keystone-moon/keystone/assignment/core.py
+++ b/keystone-moon/keystone/assignment/core.py
@@ -12,9 +12,10 @@
# License for the specific language governing permissions and limitations
# under the License.
-"""Main entry point into the assignment service."""
+"""Main entry point into the Assignment service."""
import abc
+import copy
from oslo_config import cfg
from oslo_log import log
@@ -28,7 +29,6 @@ from keystone import exception
from keystone.i18n import _
from keystone.i18n import _LI
from keystone import notifications
-from keystone.openstack.common import versionutils
CONF = cfg.CONF
@@ -36,40 +36,6 @@ LOG = log.getLogger(__name__)
MEMOIZE = cache.get_memoization_decorator(section='role')
-def deprecated_to_role_api(f):
- """Specialized deprecation wrapper for assignment to role api.
-
- This wraps the standard deprecation wrapper and fills in the method
- names automatically.
-
- """
- @six.wraps(f)
- def wrapper(*args, **kwargs):
- x = versionutils.deprecated(
- what='assignment.' + f.__name__ + '()',
- as_of=versionutils.deprecated.KILO,
- in_favor_of='role.' + f.__name__ + '()')
- return x(f)
- return wrapper()
-
-
-def deprecated_to_resource_api(f):
- """Specialized deprecation wrapper for assignment to resource api.
-
- This wraps the standard deprecation wrapper and fills in the method
- names automatically.
-
- """
- @six.wraps(f)
- def wrapper(*args, **kwargs):
- x = versionutils.deprecated(
- what='assignment.' + f.__name__ + '()',
- as_of=versionutils.deprecated.KILO,
- in_favor_of='resource.' + f.__name__ + '()')
- return x(f)
- return wrapper()
-
-
@dependency.provider('assignment_api')
@dependency.requires('credential_api', 'identity_api', 'resource_api',
'revoke_api', 'role_api')
@@ -80,6 +46,9 @@ class Manager(manager.Manager):
dynamically calls the backend.
"""
+
+ driver_namespace = 'keystone.assignment'
+
_PROJECT = 'project'
_ROLE_REMOVED_FROM_USER = 'role_removed_from_user'
_INVALIDATION_USER_PROJECT_TOKENS = 'invalidate_user_project_tokens'
@@ -129,7 +98,7 @@ class Manager(manager.Manager):
"""
def _get_group_project_roles(user_id, project_ref):
group_ids = self._get_group_ids_for_user_id(user_id)
- return self.driver.list_role_ids_for_groups_on_project(
+ return self.list_role_ids_for_groups_on_project(
group_ids,
project_ref['id'],
project_ref['domain_id'],
@@ -155,7 +124,8 @@ class Manager(manager.Manager):
except (exception.MetadataNotFound, exception.NotImplemented):
pass
# As well inherited roles from parent projects
- for p in self.list_project_parents(project_ref['id']):
+ for p in self.resource_api.list_project_parents(
+ project_ref['id']):
p_roles = self.list_grants(
user_id=user_id, project_id=p['id'],
inherited_to_projects=True)
@@ -207,7 +177,7 @@ class Manager(manager.Manager):
return self._roles_from_role_dicts(
metadata_ref.get('roles', {}), False)
- self.get_domain(domain_id)
+ self.resource_api.get_domain(domain_id)
user_role_list = _get_user_domain_roles(user_id, domain_id)
group_role_list = _get_group_domain_roles(user_id, domain_id)
# Use set() to process the list to remove any duplicates
@@ -218,11 +188,11 @@ class Manager(manager.Manager):
if project_id is not None:
project = self.resource_api.get_project(project_id)
- role_ids = self.driver.list_role_ids_for_groups_on_project(
+ role_ids = self.list_role_ids_for_groups_on_project(
group_ids, project_id, project['domain_id'],
self._list_parent_ids_of_project(project_id))
elif domain_id is not None:
- role_ids = self.driver.list_role_ids_for_groups_on_domain(
+ role_ids = self.list_role_ids_for_groups_on_domain(
group_ids, domain_id)
else:
raise AttributeError(_("Must specify either domain or project"))
@@ -261,10 +231,24 @@ class Manager(manager.Manager):
tenant_id,
CONF.member_role_id)
- def add_role_to_user_and_project(self, user_id, tenant_id, role_id):
- self.resource_api.get_project(tenant_id)
+ @notifications.role_assignment('created')
+ def _add_role_to_user_and_project_adapter(self, role_id, user_id=None,
+ group_id=None, domain_id=None,
+ project_id=None,
+ inherited_to_projects=False,
+ context=None):
+
+ # The parameters for this method must match the parameters for
+ # create_grant so that the notifications.role_assignment decorator
+ # will work.
+
+ self.resource_api.get_project(project_id)
self.role_api.get_role(role_id)
- self.driver.add_role_to_user_and_project(user_id, tenant_id, role_id)
+ self.driver.add_role_to_user_and_project(user_id, project_id, role_id)
+
+ def add_role_to_user_and_project(self, user_id, tenant_id, role_id):
+ self._add_role_to_user_and_project_adapter(
+ role_id, user_id=user_id, project_id=tenant_id)
def remove_user_from_project(self, tenant_id, user_id):
"""Remove user from a tenant
@@ -299,7 +283,7 @@ class Manager(manager.Manager):
# optimization with the various backend technologies (SQL, LDAP etc.).
group_ids = self._get_group_ids_for_user_id(user_id)
- project_ids = self.driver.list_project_ids_for_user(
+ project_ids = self.list_project_ids_for_user(
user_id, group_ids, hints or driver_hints.Hints())
if not CONF.os_inherit.enabled:
@@ -309,7 +293,7 @@ class Manager(manager.Manager):
# inherited role (direct or group) on any parent project, in which
# case we must add in all the projects in that parent's subtree.
project_ids = set(project_ids)
- project_ids_inherited = self.driver.list_project_ids_for_user(
+ project_ids_inherited = self.list_project_ids_for_user(
user_id, group_ids, hints or driver_hints.Hints(), inherited=True)
for proj_id in project_ids_inherited:
project_ids.update(
@@ -317,7 +301,7 @@ class Manager(manager.Manager):
self.resource_api.list_projects_in_subtree(proj_id)))
# Now do the same for any domain inherited roles
- domain_ids = self.driver.list_domain_ids_for_user(
+ domain_ids = self.list_domain_ids_for_user(
user_id, group_ids, hints or driver_hints.Hints(),
inherited=True)
project_ids.update(
@@ -335,33 +319,42 @@ class Manager(manager.Manager):
# projects for a user is pushed down into the driver to enable
# optimization with the various backend technologies (SQL, LDAP etc.).
group_ids = self._get_group_ids_for_user_id(user_id)
- domain_ids = self.driver.list_domain_ids_for_user(
+ domain_ids = self.list_domain_ids_for_user(
user_id, group_ids, hints or driver_hints.Hints())
return self.resource_api.list_domains_from_ids(domain_ids)
def list_domains_for_groups(self, group_ids):
- domain_ids = self.driver.list_domain_ids_for_groups(group_ids)
+ domain_ids = self.list_domain_ids_for_groups(group_ids)
return self.resource_api.list_domains_from_ids(domain_ids)
def list_projects_for_groups(self, group_ids):
project_ids = (
- self.driver.list_project_ids_for_groups(group_ids,
- driver_hints.Hints()))
+ self.list_project_ids_for_groups(group_ids, driver_hints.Hints()))
if not CONF.os_inherit.enabled:
return self.resource_api.list_projects_from_ids(project_ids)
- # Inherited roles are enabled, so check to see if these groups have any
- # roles on any domain, in which case we must add in all the projects
- # in that domain.
+ # os_inherit extension is enabled, so check to see if these groups have
+ # any inherited role assignment on: i) any domain, in which case we
+ # must add in all the projects in that domain; ii) any project, in
+ # which case we must add in all the subprojects under that project in
+ # the hierarchy.
- domain_ids = self.driver.list_domain_ids_for_groups(
- group_ids, inherited=True)
+ domain_ids = self.list_domain_ids_for_groups(group_ids, inherited=True)
project_ids_from_domains = (
self.resource_api.list_project_ids_from_domain_ids(domain_ids))
+ parents_ids = self.list_project_ids_for_groups(group_ids,
+ driver_hints.Hints(),
+ inherited=True)
+
+ subproject_ids = []
+ for parent_id in parents_ids:
+ subtree = self.resource_api.list_projects_in_subtree(parent_id)
+ subproject_ids += [subproject['id'] for subproject in subtree]
+
return self.resource_api.list_projects_from_ids(
- list(set(project_ids + project_ids_from_domains)))
+ list(set(project_ids + project_ids_from_domains + subproject_ids)))
def list_role_assignments_for_role(self, role_id=None):
# NOTE(henry-nash): Currently the efficiency of the key driver
@@ -374,17 +367,37 @@ class Manager(manager.Manager):
return [r for r in self.driver.list_role_assignments()
if r['role_id'] == role_id]
- def remove_role_from_user_and_project(self, user_id, tenant_id, role_id):
- self.driver.remove_role_from_user_and_project(user_id, tenant_id,
+ @notifications.role_assignment('deleted')
+ def _remove_role_from_user_and_project_adapter(self, role_id, user_id=None,
+ group_id=None,
+ domain_id=None,
+ project_id=None,
+ inherited_to_projects=False,
+ context=None):
+
+ # The parameters for this method must match the parameters for
+ # delete_grant so that the notifications.role_assignment decorator
+ # will work.
+
+ self.driver.remove_role_from_user_and_project(user_id, project_id,
role_id)
self.identity_api.emit_invalidate_user_token_persistence(user_id)
self.revoke_api.revoke_by_grant(role_id, user_id=user_id,
- project_id=tenant_id)
+ project_id=project_id)
+
+ def remove_role_from_user_and_project(self, user_id, tenant_id, role_id):
+ self._remove_role_from_user_and_project_adapter(
+ role_id, user_id=user_id, project_id=tenant_id)
@notifications.internal(notifications.INVALIDATE_USER_TOKEN_PERSISTENCE)
def _emit_invalidate_user_token_persistence(self, user_id):
self.identity_api.emit_invalidate_user_token_persistence(user_id)
+ def _emit_invalidate_grant_token_persistence(self, user_id, project_id):
+ self.identity_api.emit_invalidate_grant_token_persistence(
+ {'user_id': user_id, 'project_id': project_id}
+ )
+
@notifications.role_assignment('created')
def create_grant(self, role_id, user_id=None, group_id=None,
domain_id=None, project_id=None,
@@ -405,7 +418,7 @@ class Manager(manager.Manager):
self.resource_api.get_domain(domain_id)
if project_id:
self.resource_api.get_project(project_id)
- self.driver.check_grant_role_id(
+ self.check_grant_role_id(
role_id, user_id, group_id, domain_id, project_id,
inherited_to_projects)
return role_ref
@@ -417,11 +430,15 @@ class Manager(manager.Manager):
self.resource_api.get_domain(domain_id)
if project_id:
self.resource_api.get_project(project_id)
- grant_ids = self.driver.list_grant_role_ids(
+ grant_ids = self.list_grant_role_ids(
user_id, group_id, domain_id, project_id, inherited_to_projects)
return self.role_api.list_roles_from_ids(grant_ids)
@notifications.role_assignment('deleted')
+ def _emit_revoke_user_grant(self, role_id, user_id, domain_id, project_id,
+ inherited_to_projects, context):
+ self._emit_invalidate_grant_token_persistence(user_id, project_id)
+
def delete_grant(self, role_id, user_id=None, group_id=None,
domain_id=None, project_id=None,
inherited_to_projects=False, context=None):
@@ -430,17 +447,29 @@ class Manager(manager.Manager):
role_id=role_id,
domain_id=domain_id,
project_id=project_id)
+ self._emit_revoke_user_grant(
+ role_id, user_id, domain_id, project_id,
+ inherited_to_projects, context)
else:
try:
- # NOTE(morganfainberg): The user ids are the important part
- # for invalidating tokens below, so extract them here.
- for user in self.identity_api.list_users_in_group(group_id):
- if user['id'] != user_id:
- self._emit_invalidate_user_token_persistence(
- user['id'])
- self.revoke_api.revoke_by_grant(
- user_id=user['id'], role_id=role_id,
- domain_id=domain_id, project_id=project_id)
+ # Group may contain a lot of users so revocation will be
+ # by role & domain/project
+ if domain_id is None:
+ self.revoke_api.revoke_by_project_role_assignment(
+ project_id, role_id
+ )
+ else:
+ self.revoke_api.revoke_by_domain_role_assignment(
+ domain_id, role_id
+ )
+ if CONF.token.revoke_by_id:
+ # NOTE(morganfainberg): The user ids are the important part
+ # for invalidating tokens below, so extract them here.
+ for user in self.identity_api.list_users_in_group(
+ group_id):
+ self._emit_revoke_user_grant(
+ role_id, user['id'], domain_id, project_id,
+ inherited_to_projects, context)
except exception.GroupNotFound:
LOG.debug('Group %s not found, no tokens to invalidate.',
group_id)
@@ -457,8 +486,356 @@ class Manager(manager.Manager):
self.resource_api.get_project(project_id)
self.driver.delete_grant(role_id, user_id, group_id, domain_id,
project_id, inherited_to_projects)
- if user_id is not None:
- self._emit_invalidate_user_token_persistence(user_id)
+
+ # The methods _expand_indirect_assignment, _list_direct_role_assignments
+ # and _list_effective_role_assignments below are only used on
+ # list_role_assignments, but they are not in its scope as nested functions
+ # since it would significantly increase McCabe complexity, that should be
+ # kept as it is in order to detect unnecessarily complex code, which is not
+ # this case.
+
+ def _expand_indirect_assignment(self, ref, user_id=None,
+ project_id=None):
+ """Returns a list of expanded role assignments.
+
+ This methods is called for each discovered assignment that either needs
+ a group assignment expanded into individual user assignments, or needs
+ an inherited assignment to be applied to its children.
+
+ In all cases, if either user_id and/or project_id is specified, then we
+ filter the result on those values.
+
+ """
+
+ def create_group_assignment(base_ref, user_id):
+ """Creates a group assignment from the provided ref."""
+
+ ref = copy.deepcopy(base_ref)
+
+ ref['user_id'] = user_id
+
+ indirect = ref.setdefault('indirect', {})
+ indirect['group_id'] = ref.pop('group_id')
+
+ return ref
+
+ def expand_group_assignment(ref, user_id):
+ """Expands group role assignment.
+
+ For any group role assignment on a target, it is replaced by a list
+ of role assignments containing one for each user of that group on
+ that target.
+
+ An example of accepted ref is:
+
+ {
+ 'group_id': group_id,
+ 'project_id': project_id,
+ 'role_id': role_id
+ }
+
+ Once expanded, it should be returned as a list of entities like the
+ one below, one for each each user_id in the provided group_id.
+
+ {
+ 'user_id': user_id,
+ 'project_id': project_id,
+ 'role_id': role_id,
+ 'indirect' : {
+ 'group_id': group_id
+ }
+ }
+
+ Returned list will be formatted by the Controller, which will
+ deduce a role assignment came from group membership if it has both
+ 'user_id' in the main body of the dict and 'group_id' in indirect
+ subdict.
+
+ """
+ if user_id:
+ return [create_group_assignment(ref, user_id=user_id)]
+
+ return [create_group_assignment(ref, user_id=m['id'])
+ for m in self.identity_api.list_users_in_group(
+ ref['group_id'])]
+
+ def expand_inherited_assignment(ref, user_id, project_id=None):
+ """Expands inherited role assignments.
+
+ If this is a group role assignment on a target, replace it by a
+ list of role assignments containing one for each user of that
+ group, on every project under that target.
+
+ If this is a user role assignment on a target, replace it by a
+ list of role assignments for that user on every project under
+ that target.
+
+ An example of accepted ref is:
+
+ {
+ 'group_id': group_id,
+ 'project_id': parent_id,
+ 'role_id': role_id,
+ 'inherited_to_projects': 'projects'
+ }
+
+ Once expanded, it should be returned as a list of entities like the
+ one below, one for each each user_id in the provided group_id and
+ for each subproject_id in the project_id subtree.
+
+ {
+ 'user_id': user_id,
+ 'project_id': subproject_id,
+ 'role_id': role_id,
+ 'indirect' : {
+ 'group_id': group_id,
+ 'project_id': parent_id
+ }
+ }
+
+ Returned list will be formatted by the Controller, which will
+ deduce a role assignment came from group membership if it has both
+ 'user_id' in the main body of the dict and 'group_id' in the
+ 'indirect' subdict, as well as it is possible to deduce if it has
+ come from inheritance if it contains both a 'project_id' in the
+ main body of the dict and 'parent_id' in the 'indirect' subdict.
+
+ """
+ def create_inherited_assignment(base_ref, project_id):
+ """Creates a project assignment from the provided ref.
+
+ base_ref can either be a project or domain inherited
+ assignment ref.
+
+ """
+ ref = copy.deepcopy(base_ref)
+
+ indirect = ref.setdefault('indirect', {})
+ if ref.get('project_id'):
+ indirect['project_id'] = ref.pop('project_id')
+ else:
+ indirect['domain_id'] = ref.pop('domain_id')
+
+ ref['project_id'] = project_id
+ ref.pop('inherited_to_projects')
+
+ return ref
+
+ # Define expanded project list to which to apply this assignment
+ if project_id:
+ # Since ref is an inherited assignment, it must have come from
+ # the domain or a parent. We only need apply it to the project
+ # requested.
+ project_ids = [project_id]
+ elif ref.get('domain_id'):
+ # A domain inherited assignment, so apply it to all projects
+ # in this domain
+ project_ids = (
+ [x['id'] for x in
+ self.resource_api.list_projects_in_domain(
+ ref['domain_id'])])
+ else:
+ # It must be a project assignment, so apply it to the subtree
+ project_ids = (
+ [x['id'] for x in
+ self.resource_api.list_projects_in_subtree(
+ ref['project_id'])])
+
+ new_refs = []
+ if 'group_id' in ref:
+ # Expand role assignment for all members and for all projects
+ for ref in expand_group_assignment(ref, user_id):
+ new_refs += [create_inherited_assignment(ref, proj_id)
+ for proj_id in project_ids]
+ else:
+ # Expand role assignment for all projects
+ new_refs += [create_inherited_assignment(ref, proj_id)
+ for proj_id in project_ids]
+
+ return new_refs
+
+ if ref.get('inherited_to_projects') == 'projects':
+ return expand_inherited_assignment(ref, user_id, project_id)
+ elif 'group_id' in ref:
+ return expand_group_assignment(ref, user_id)
+ return [ref]
+
+ def _list_effective_role_assignments(self, role_id, user_id, group_id,
+ domain_id, project_id, inherited):
+ """List role assignments in effective mode.
+
+ When using effective mode, besides the direct assignments, the indirect
+ ones that come from grouping or inheritance are retrieved and will then
+ be expanded.
+
+ The resulting list of assignments will be filtered by the provided
+ parameters, although since we are in effective mode, group can never
+ act as a filter (since group assignments are expanded into user roles)
+ and domain can only be filter if we want non-inherited assignments,
+ since domains can't inherit assignments.
+
+ The goal of this method is to only ask the driver for those
+ assignments as could effect the result based on the parameter filters
+ specified, hence avoiding retrieving a huge list.
+
+ """
+
+ def list_role_assignments_for_actor(
+ role_id, inherited, user_id=None,
+ group_ids=None, project_id=None, domain_id=None):
+ """List role assignments for actor on target.
+
+ List direct and indirect assignments for an actor, optionally
+ for a given target (i.e. project or domain).
+
+ :param role_id: List for a specific role, can be None meaning all
+ roles
+ :param inherited: Indicates whether inherited assignments or only
+ direct assignments are required. If None, then
+ both are required.
+ :param user_id: If not None, list only assignments that affect this
+ user.
+ :param group_ids: A list of groups required. Only one of user_id
+ and group_ids can be specified
+ :param project_id: If specified, only include those assignments
+ that affect this project
+ :param domain_id: If specified, only include those assignments
+ that affect this domain - by definition this will
+ not include any inherited assignments
+
+ :returns: List of assignments matching the criteria. Any inherited
+ or group assignments that could affect the resulting
+ response are included.
+
+ """
+
+ # List direct project role assignments
+ project_ids = [project_id] if project_id else None
+
+ non_inherited_refs = []
+ if inherited is False or inherited is None:
+ # Get non inherited assignments
+ non_inherited_refs = self.driver.list_role_assignments(
+ role_id=role_id, domain_id=domain_id,
+ project_ids=project_ids, user_id=user_id,
+ group_ids=group_ids, inherited_to_projects=False)
+
+ inherited_refs = []
+ if inherited is True or inherited is None:
+ # Get inherited assignments
+ if project_id:
+ # If we are filtering by a specific project, then we can
+ # only get inherited assignments from its domain or from
+ # any of its parents.
+
+ # List inherited assignments from the project's domain
+ proj_domain_id = self.resource_api.get_project(
+ project_id)['domain_id']
+ inherited_refs += self.driver.list_role_assignments(
+ role_id=role_id, domain_id=proj_domain_id,
+ user_id=user_id, group_ids=group_ids,
+ inherited_to_projects=True)
+
+ # And those assignments that could be inherited from the
+ # project's parents.
+ parent_ids = [project['id'] for project in
+ self.resource_api.list_project_parents(
+ project_id)]
+ if parent_ids:
+ inherited_refs += self.driver.list_role_assignments(
+ role_id=role_id, project_ids=parent_ids,
+ user_id=user_id, group_ids=group_ids,
+ inherited_to_projects=True)
+ else:
+ # List inherited assignments without filtering by target
+ inherited_refs = self.driver.list_role_assignments(
+ role_id=role_id, user_id=user_id, group_ids=group_ids,
+ inherited_to_projects=True)
+
+ return non_inherited_refs + inherited_refs
+
+ # If filtering by group or inherited domain assignment the list is
+ # guranteed to be empty
+ if group_id or (domain_id and inherited):
+ return []
+
+ # If filtering by domain, then only non-inherited assignments are
+ # relevant, since domains don't inherit assignments
+ inherited = False if domain_id else inherited
+
+ # List user assignments
+ direct_refs = list_role_assignments_for_actor(
+ role_id=role_id, user_id=user_id, project_id=project_id,
+ domain_id=domain_id, inherited=inherited)
+
+ # And those from the user's groups
+ group_refs = []
+ if user_id:
+ group_ids = self._get_group_ids_for_user_id(user_id)
+ if group_ids:
+ group_refs = list_role_assignments_for_actor(
+ role_id=role_id, project_id=project_id,
+ group_ids=group_ids, domain_id=domain_id,
+ inherited=inherited)
+
+ # Expand grouping and inheritance on retrieved role assignments
+ refs = []
+ for ref in (direct_refs + group_refs):
+ refs += self._expand_indirect_assignment(ref=ref, user_id=user_id,
+ project_id=project_id)
+
+ return refs
+
+ def _list_direct_role_assignments(self, role_id, user_id, group_id,
+ domain_id, project_id, inherited):
+ """List role assignments without applying expansion.
+
+ Returns a list of direct role assignments, where their attributes match
+ the provided filters.
+
+ """
+ group_ids = [group_id] if group_id else None
+ project_ids = [project_id] if project_id else None
+
+ return self.driver.list_role_assignments(
+ role_id=role_id, user_id=user_id, group_ids=group_ids,
+ domain_id=domain_id, project_ids=project_ids,
+ inherited_to_projects=inherited)
+
+ def list_role_assignments(self, role_id=None, user_id=None, group_id=None,
+ domain_id=None, project_id=None, inherited=None,
+ effective=None):
+ """List role assignments, honoring effective mode and provided filters.
+
+ Returns a list of role assignments, where their attributes match the
+ provided filters (role_id, user_id, group_id, domain_id, project_id and
+ inherited). The inherited filter defaults to None, meaning to get both
+ non-inherited and inherited role assignments.
+
+ If effective mode is specified, this means that rather than simply
+ return the assignments that match the filters, any group or
+ inheritance assignments will be expanded. Group assignments will
+ become assignments for all the users in that group, and inherited
+ assignments will be shown on the projects below the assignment point.
+ Think of effective mode as being the list of assignments that actually
+ affect a user, for example the roles that would be placed in a token.
+
+ If OS-INHERIT extension is disabled or the used driver does not support
+ inherited roles retrieval, inherited role assignments will be ignored.
+
+ """
+
+ if not CONF.os_inherit.enabled:
+ if inherited:
+ return []
+ inherited = False
+
+ if effective:
+ return self._list_effective_role_assignments(
+ role_id, user_id, group_id, domain_id, project_id, inherited)
+ else:
+ return self._list_direct_role_assignments(
+ role_id, user_id, group_id, domain_id, project_id, inherited)
def delete_tokens_for_role_assignments(self, role_id):
assignments = self.list_role_assignments_for_role(role_id=role_id)
@@ -532,98 +909,6 @@ class Manager(manager.Manager):
# from persistence if persistence is enabled.
pass
- @deprecated_to_role_api
- def create_role(self, role_id, role):
- return self.role_api.create_role(role_id, role)
-
- @deprecated_to_role_api
- def get_role(self, role_id):
- return self.role_api.get_role(role_id)
-
- @deprecated_to_role_api
- def update_role(self, role_id, role):
- return self.role_api.update_role(role_id, role)
-
- @deprecated_to_role_api
- def delete_role(self, role_id):
- return self.role_api.delete_role(role_id)
-
- @deprecated_to_role_api
- def list_roles(self, hints=None):
- return self.role_api.list_roles(hints=hints)
-
- @deprecated_to_resource_api
- def create_project(self, project_id, project):
- return self.resource_api.create_project(project_id, project)
-
- @deprecated_to_resource_api
- def get_project_by_name(self, tenant_name, domain_id):
- return self.resource_api.get_project_by_name(tenant_name, domain_id)
-
- @deprecated_to_resource_api
- def get_project(self, project_id):
- return self.resource_api.get_project(project_id)
-
- @deprecated_to_resource_api
- def update_project(self, project_id, project):
- return self.resource_api.update_project(project_id, project)
-
- @deprecated_to_resource_api
- def delete_project(self, project_id):
- return self.resource_api.delete_project(project_id)
-
- @deprecated_to_resource_api
- def list_projects(self, hints=None):
- return self.resource_api.list_projects(hints=hints)
-
- @deprecated_to_resource_api
- def list_projects_in_domain(self, domain_id):
- return self.resource_api.list_projects_in_domain(domain_id)
-
- @deprecated_to_resource_api
- def create_domain(self, domain_id, domain):
- return self.resource_api.create_domain(domain_id, domain)
-
- @deprecated_to_resource_api
- def get_domain_by_name(self, domain_name):
- return self.resource_api.get_domain_by_name(domain_name)
-
- @deprecated_to_resource_api
- def get_domain(self, domain_id):
- return self.resource_api.get_domain(domain_id)
-
- @deprecated_to_resource_api
- def update_domain(self, domain_id, domain):
- return self.resource_api.update_domain(domain_id, domain)
-
- @deprecated_to_resource_api
- def delete_domain(self, domain_id):
- return self.resource_api.delete_domain(domain_id)
-
- @deprecated_to_resource_api
- def list_domains(self, hints=None):
- return self.resource_api.list_domains(hints=hints)
-
- @deprecated_to_resource_api
- def assert_domain_enabled(self, domain_id, domain=None):
- return self.resource_api.assert_domain_enabled(domain_id, domain)
-
- @deprecated_to_resource_api
- def assert_project_enabled(self, project_id, project=None):
- return self.resource_api.assert_project_enabled(project_id, project)
-
- @deprecated_to_resource_api
- def is_leaf_project(self, project_id):
- return self.resource_api.is_leaf_project(project_id)
-
- @deprecated_to_resource_api
- def list_project_parents(self, project_id, user_id=None):
- return self.resource_api.list_project_parents(project_id, user_id)
-
- @deprecated_to_resource_api
- def list_projects_in_subtree(self, project_id, user_id=None):
- return self.resource_api.list_projects_in_subtree(project_id, user_id)
-
@six.add_metaclass(abc.ABCMeta)
class Driver(object):
@@ -642,26 +927,6 @@ class Driver(object):
role_list.append(d['id'])
return role_list
- def _add_role_to_role_dicts(self, role_id, inherited, dict_list,
- allow_existing=True):
- # There is a difference in error semantics when trying to
- # assign a role that already exists between the coded v2 and v3
- # API calls. v2 will error if the assignment already exists,
- # while v3 is silent. Setting the 'allow_existing' parameter
- # appropriately lets this call be used for both.
- role_set = set([frozenset(r.items()) for r in dict_list])
- key = frozenset(self._role_to_dict(role_id, inherited).items())
- if not allow_existing and key in role_set:
- raise KeyError
- role_set.add(key)
- return [dict(r) for r in role_set]
-
- def _remove_role_from_role_dicts(self, role_id, inherited, dict_list):
- role_set = set([frozenset(r.items()) for r in dict_list])
- role_set.remove(frozenset(self._role_to_dict(role_id,
- inherited).items()))
- return [dict(r) for r in role_set]
-
def _get_list_limit(self):
return CONF.assignment.list_limit or CONF.list_limit
@@ -740,8 +1005,16 @@ class Driver(object):
raise exception.NotImplemented() # pragma: no cover
@abc.abstractmethod
- def list_role_assignments(self):
+ def list_role_assignments(self, role_id=None,
+ user_id=None, group_ids=None,
+ domain_id=None, project_ids=None,
+ inherited_to_projects=None):
+ """Returns a list of role assignments for actors on targets.
+
+ Available parameters represent values in which the returned role
+ assignments attributes need to be filtered on.
+ """
raise exception.NotImplemented() # pragma: no cover
@abc.abstractmethod
@@ -866,12 +1139,8 @@ class Driver(object):
raise exception.NotImplemented() # pragma: no cover
- # TODO(henry-nash): Rename the following two methods to match the more
- # meaningfully named ones above.
-
-# TODO(ayoung): determine what else these two functions raise
@abc.abstractmethod
- def delete_user(self, user_id):
+ def delete_user_assignments(self, user_id):
"""Deletes all assignments for a user.
:raises: keystone.exception.RoleNotFound
@@ -880,7 +1149,7 @@ class Driver(object):
raise exception.NotImplemented() # pragma: no cover
@abc.abstractmethod
- def delete_group(self, group_id):
+ def delete_group_assignments(self, group_id):
"""Deletes all assignments for a group.
:raises: keystone.exception.RoleNotFound
@@ -894,6 +1163,8 @@ class Driver(object):
class RoleManager(manager.Manager):
"""Default pivot point for the Role backend."""
+ driver_namespace = 'keystone.role'
+
_ROLE = 'role'
def __init__(self):
@@ -902,9 +1173,8 @@ class RoleManager(manager.Manager):
role_driver = CONF.role.driver
if role_driver is None:
- assignment_driver = (
- dependency.get_provider('assignment_api').driver)
- role_driver = assignment_driver.default_role_driver()
+ assignment_manager = dependency.get_provider('assignment_api')
+ role_driver = assignment_manager.default_role_driver()
super(RoleManager, self).__init__(role_driver)
diff --git a/keystone-moon/keystone/auth/controllers.py b/keystone-moon/keystone/auth/controllers.py
index 065f1f01..04124696 100644
--- a/keystone-moon/keystone/auth/controllers.py
+++ b/keystone-moon/keystone/auth/controllers.py
@@ -17,16 +17,18 @@ import sys
from keystoneclient.common import cms
from oslo_config import cfg
from oslo_log import log
+from oslo_log import versionutils
from oslo_serialization import jsonutils
from oslo_utils import importutils
-from oslo_utils import timeutils
import six
+import stevedore
from keystone.common import controller
from keystone.common import dependency
+from keystone.common import utils
from keystone.common import wsgi
from keystone import config
-from keystone.contrib import federation
+from keystone.contrib.federation import constants as federation_constants
from keystone import exception
from keystone.i18n import _, _LI, _LW
from keystone.resource import controllers as resource_controllers
@@ -41,6 +43,27 @@ AUTH_METHODS = {}
AUTH_PLUGINS_LOADED = False
+def load_auth_method(method):
+ plugin_name = CONF.auth.get(method) or 'default'
+ try:
+ namespace = 'keystone.auth.%s' % method
+ driver_manager = stevedore.DriverManager(namespace, plugin_name,
+ invoke_on_load=True)
+ return driver_manager.driver
+ except RuntimeError:
+ LOG.debug('Failed to load the %s driver (%s) using stevedore, will '
+ 'attempt to load using import_object instead.',
+ method, plugin_name)
+
+ @versionutils.deprecated(as_of=versionutils.deprecated.LIBERTY,
+ in_favor_of='entrypoints',
+ what='direct import of driver')
+ def _load_using_import(plugin_name):
+ return importutils.import_object(plugin_name)
+
+ return _load_using_import(plugin_name)
+
+
def load_auth_methods():
global AUTH_PLUGINS_LOADED
@@ -50,28 +73,8 @@ def load_auth_methods():
# config.setup_authentication should be idempotent, call it to ensure we
# have setup all the appropriate configuration options we may need.
config.setup_authentication()
- for plugin in CONF.auth.methods:
- if '.' in plugin:
- # NOTE(morganfainberg): if '.' is in the plugin name, it should be
- # imported rather than used as a plugin identifier.
- plugin_class = plugin
- driver = importutils.import_object(plugin)
- if not hasattr(driver, 'method'):
- raise ValueError(_('Cannot load an auth-plugin by class-name '
- 'without a "method" attribute defined: %s'),
- plugin_class)
-
- LOG.info(_LI('Loading auth-plugins by class-name is deprecated.'))
- plugin_name = driver.method
- else:
- plugin_name = plugin
- plugin_class = CONF.auth.get(plugin)
- driver = importutils.import_object(plugin_class)
- if plugin_name in AUTH_METHODS:
- raise ValueError(_('Auth plugin %(plugin)s is requesting '
- 'previously registered method %(method)s') %
- {'plugin': plugin_class, 'method': driver.method})
- AUTH_METHODS[plugin_name] = driver
+ for plugin in set(CONF.auth.methods):
+ AUTH_METHODS[plugin] = load_auth_method(plugin)
AUTH_PLUGINS_LOADED = True
@@ -121,11 +124,7 @@ class AuthContext(dict):
return super(AuthContext, self).__setitem__(key, val)
-# TODO(blk-u): this class doesn't use identity_api directly, but makes it
-# available for consumers. Consumers should probably not be getting
-# identity_api from this since it's available in global registry, then
-# identity_api should be removed from this list.
-@dependency.requires('identity_api', 'resource_api', 'trust_api')
+@dependency.requires('resource_api', 'trust_api')
class AuthInfo(object):
"""Encapsulation of "auth" request."""
@@ -217,8 +216,6 @@ class AuthInfo(object):
raise exception.ValidationError(attribute='trust_id',
target='trust')
trust = self.trust_api.get_trust(trust_id)
- if not trust:
- raise exception.TrustNotFound(trust_id=trust_id)
return trust
def _validate_and_normalize_scope_data(self):
@@ -415,7 +412,7 @@ class Auth(controller.V3Controller):
return
# Skip scoping when unscoped federated token is being issued
- if federation.IDENTITY_PROVIDER in auth_context:
+ if federation_constants.IDENTITY_PROVIDER in auth_context:
return
# Do not scope if request is for explicitly unscoped token
@@ -546,7 +543,7 @@ class Auth(controller.V3Controller):
for t in tokens:
expires = t['expires']
if not (expires and isinstance(expires, six.text_type)):
- t['expires'] = timeutils.isotime(expires)
+ t['expires'] = utils.isotime(expires)
data = {'revoked': tokens}
json_data = jsonutils.dumps(data)
signed_text = cms.cms_sign_text(json_data,
diff --git a/keystone-moon/keystone/auth/plugins/core.py b/keystone-moon/keystone/auth/plugins/core.py
index 96a5ecf8..bcad27e5 100644
--- a/keystone-moon/keystone/auth/plugins/core.py
+++ b/keystone-moon/keystone/auth/plugins/core.py
@@ -21,6 +21,7 @@ import six
from keystone.common import dependency
from keystone import exception
+
CONF = cfg.CONF
LOG = log.getLogger(__name__)
@@ -51,7 +52,7 @@ def convert_method_list_to_integer(methods):
method_ints = []
for method in methods:
- for k, v in six.iteritems(method_map):
+ for k, v in method_map.items():
if v == method:
method_ints.append(k)
return sum(method_ints)
@@ -71,7 +72,7 @@ def convert_integer_to_method_list(method_int):
method_map = construct_method_map_from_config()
method_ints = []
- for k, v in six.iteritems(method_map):
+ for k, v in method_map.items():
method_ints.append(k)
method_ints.sort(reverse=True)
diff --git a/keystone-moon/keystone/auth/plugins/external.py b/keystone-moon/keystone/auth/plugins/external.py
index 2322649f..cabe6282 100644
--- a/keystone-moon/keystone/auth/plugins/external.py
+++ b/keystone-moon/keystone/auth/plugins/external.py
@@ -23,7 +23,6 @@ from keystone import auth
from keystone.common import dependency
from keystone import exception
from keystone.i18n import _
-from keystone.openstack.common import versionutils
CONF = cfg.CONF
@@ -31,9 +30,6 @@ CONF = cfg.CONF
@six.add_metaclass(abc.ABCMeta)
class Base(auth.AuthMethodHandler):
-
- method = 'external'
-
def authenticate(self, context, auth_info, auth_context):
"""Use REMOTE_USER to look up the user in the identity backend.
@@ -96,91 +92,10 @@ class Domain(Base):
return user_ref
-@dependency.requires('assignment_api', 'identity_api')
class KerberosDomain(Domain):
"""Allows `kerberos` as a method."""
- method = 'kerberos'
-
def _authenticate(self, remote_user, context):
auth_type = context['environment'].get('AUTH_TYPE')
if auth_type != 'Negotiate':
raise exception.Unauthorized(_("auth_type is not Negotiate"))
return super(KerberosDomain, self)._authenticate(remote_user, context)
-
-
-class ExternalDefault(DefaultDomain):
- """Deprecated. Please use keystone.auth.external.DefaultDomain instead."""
-
- @versionutils.deprecated(
- as_of=versionutils.deprecated.ICEHOUSE,
- in_favor_of='keystone.auth.external.DefaultDomain',
- remove_in=+1)
- def __init__(self):
- super(ExternalDefault, self).__init__()
-
-
-class ExternalDomain(Domain):
- """Deprecated. Please use keystone.auth.external.Domain instead."""
-
- @versionutils.deprecated(
- as_of=versionutils.deprecated.ICEHOUSE,
- in_favor_of='keystone.auth.external.Domain',
- remove_in=+1)
- def __init__(self):
- super(ExternalDomain, self).__init__()
-
-
-@dependency.requires('identity_api')
-class LegacyDefaultDomain(Base):
- """Deprecated. Please use keystone.auth.external.DefaultDomain instead.
-
- This plugin exists to provide compatibility for the unintended behavior
- described here: https://bugs.launchpad.net/keystone/+bug/1253484
-
- """
-
- @versionutils.deprecated(
- as_of=versionutils.deprecated.ICEHOUSE,
- in_favor_of='keystone.auth.external.DefaultDomain',
- remove_in=+1)
- def __init__(self):
- super(LegacyDefaultDomain, self).__init__()
-
- def _authenticate(self, remote_user, context):
- """Use remote_user to look up the user in the identity backend."""
- # NOTE(dolph): this unintentionally discards half the REMOTE_USER value
- names = remote_user.split('@')
- username = names.pop(0)
- domain_id = CONF.identity.default_domain_id
- user_ref = self.identity_api.get_user_by_name(username, domain_id)
- return user_ref
-
-
-@dependency.requires('identity_api', 'resource_api')
-class LegacyDomain(Base):
- """Deprecated. Please use keystone.auth.external.Domain instead."""
-
- @versionutils.deprecated(
- as_of=versionutils.deprecated.ICEHOUSE,
- in_favor_of='keystone.auth.external.Domain',
- remove_in=+1)
- def __init__(self):
- super(LegacyDomain, self).__init__()
-
- def _authenticate(self, remote_user, context):
- """Use remote_user to look up the user in the identity backend.
-
- If remote_user contains an `@` assume that the substring before the
- rightmost `@` is the username, and the substring after the @ is the
- domain name.
- """
- names = remote_user.rsplit('@', 1)
- username = names.pop(0)
- if names:
- domain_name = names[0]
- domain_ref = self.resource_api.get_domain_by_name(domain_name)
- domain_id = domain_ref['id']
- else:
- domain_id = CONF.identity.default_domain_id
- user_ref = self.identity_api.get_user_by_name(username, domain_id)
- return user_ref
diff --git a/keystone-moon/keystone/auth/plugins/mapped.py b/keystone-moon/keystone/auth/plugins/mapped.py
index abf44481..220ff013 100644
--- a/keystone-moon/keystone/auth/plugins/mapped.py
+++ b/keystone-moon/keystone/auth/plugins/mapped.py
@@ -13,14 +13,13 @@
import functools
from oslo_log import log
-from oslo_serialization import jsonutils
from pycadf import cadftaxonomy as taxonomy
from six.moves.urllib import parse
from keystone import auth
from keystone.auth import plugins as auth_plugins
from keystone.common import dependency
-from keystone.contrib import federation
+from keystone.contrib.federation import constants as federation_constants
from keystone.contrib.federation import utils
from keystone import exception
from keystone.i18n import _
@@ -33,8 +32,8 @@ LOG = log.getLogger(__name__)
METHOD_NAME = 'mapped'
-@dependency.requires('assignment_api', 'federation_api', 'identity_api',
- 'token_provider_api')
+@dependency.requires('federation_api', 'identity_api',
+ 'resource_api', 'token_provider_api')
class Mapped(auth.AuthMethodHandler):
def _get_token_ref(self, auth_payload):
@@ -44,7 +43,7 @@ class Mapped(auth.AuthMethodHandler):
token_data=response)
def authenticate(self, context, auth_payload, auth_context):
- """Authenticate mapped user and return an authentication context.
+ """Authenticate mapped user and set an authentication context.
:param context: keystone's request context
:param auth_payload: the content of the authentication for a
@@ -66,7 +65,7 @@ class Mapped(auth.AuthMethodHandler):
self.token_provider_api)
else:
handle_unscoped_token(context, auth_payload, auth_context,
- self.assignment_api, self.federation_api,
+ self.resource_api, self.federation_api,
self.identity_api)
@@ -101,12 +100,12 @@ def handle_scoped_token(context, auth_payload, auth_context, token_ref,
auth_context['user_id'] = user_id
auth_context['group_ids'] = group_ids
- auth_context[federation.IDENTITY_PROVIDER] = identity_provider
- auth_context[federation.PROTOCOL] = protocol
+ auth_context[federation_constants.IDENTITY_PROVIDER] = identity_provider
+ auth_context[federation_constants.PROTOCOL] = protocol
def handle_unscoped_token(context, auth_payload, auth_context,
- assignment_api, federation_api, identity_api):
+ resource_api, federation_api, identity_api):
def is_ephemeral_user(mapped_properties):
return mapped_properties['user']['type'] == utils.UserType.EPHEMERAL
@@ -115,8 +114,9 @@ def handle_unscoped_token(context, auth_payload, auth_context,
identity_provider, protocol):
auth_context['user_id'] = user['id']
auth_context['group_ids'] = mapped_properties['group_ids']
- auth_context[federation.IDENTITY_PROVIDER] = identity_provider
- auth_context[federation.PROTOCOL] = protocol
+ auth_context[federation_constants.IDENTITY_PROVIDER] = (
+ identity_provider)
+ auth_context[federation_constants.PROTOCOL] = protocol
def build_local_user_context(auth_context, mapped_properties):
user_info = auth_plugins.UserAuthInfo.create(mapped_properties,
@@ -139,17 +139,15 @@ def handle_unscoped_token(context, auth_payload, auth_context,
user_id = None
try:
- mapped_properties = apply_mapping_filter(
- identity_provider, protocol, assertion, assignment_api,
+ mapped_properties, mapping_id = apply_mapping_filter(
+ identity_provider, protocol, assertion, resource_api,
federation_api, identity_api)
if is_ephemeral_user(mapped_properties):
user = setup_username(context, mapped_properties)
user_id = user['id']
group_ids = mapped_properties['group_ids']
- mapping = federation_api.get_mapping_from_idp_and_protocol(
- identity_provider, protocol)
- utils.validate_groups_cardinality(group_ids, mapping['id'])
+ utils.validate_groups_cardinality(group_ids, mapping_id)
build_ephemeral_user_context(auth_context, user,
mapped_properties,
identity_provider, protocol)
@@ -182,32 +180,29 @@ def extract_assertion_data(context):
def apply_mapping_filter(identity_provider, protocol, assertion,
- assignment_api, federation_api, identity_api):
+ resource_api, federation_api, identity_api):
idp = federation_api.get_idp(identity_provider)
- utils.validate_idp(idp, assertion)
- mapping = federation_api.get_mapping_from_idp_and_protocol(
- identity_provider, protocol)
- rules = jsonutils.loads(mapping['rules'])
- LOG.debug('using the following rules: %s', rules)
- rule_processor = utils.RuleProcessor(rules)
- mapped_properties = rule_processor.process(assertion)
+ utils.validate_idp(idp, protocol, assertion)
+
+ mapped_properties, mapping_id = federation_api.evaluate(
+ identity_provider, protocol, assertion)
# NOTE(marek-denis): We update group_ids only here to avoid fetching
# groups identified by name/domain twice.
# NOTE(marek-denis): Groups are translated from name/domain to their
# corresponding ids in the auth plugin, as we need information what
- # ``mapping_id`` was used as well as idenity_api and assignment_api
+ # ``mapping_id`` was used as well as idenity_api and resource_api
# objects.
group_ids = mapped_properties['group_ids']
utils.validate_groups_in_backend(group_ids,
- mapping['id'],
+ mapping_id,
identity_api)
group_ids.extend(
utils.transform_to_group_ids(
- mapped_properties['group_names'], mapping['id'],
- identity_api, assignment_api))
+ mapped_properties['group_names'], mapping_id,
+ identity_api, resource_api))
mapped_properties['group_ids'] = list(set(group_ids))
- return mapped_properties
+ return mapped_properties, mapping_id
def setup_username(context, mapped_properties):
@@ -241,12 +236,17 @@ def setup_username(context, mapped_properties):
user_name = user.get('name') or context['environment'].get('REMOTE_USER')
if not any([user_id, user_name]):
- raise exception.Unauthorized(_("Could not map user"))
+ msg = _("Could not map user while setting ephemeral user identity. "
+ "Either mapping rules must specify user id/name or "
+ "REMOTE_USER environment variable must be set.")
+ raise exception.Unauthorized(msg)
elif not user_name:
user['name'] = user_id
elif not user_id:
- user['id'] = parse.quote(user_name)
+ user_id = user_name
+
+ user['id'] = parse.quote(user_id)
return user
diff --git a/keystone-moon/keystone/auth/plugins/oauth1.py b/keystone-moon/keystone/auth/plugins/oauth1.py
index 2f1cc2fa..e081cd62 100644
--- a/keystone-moon/keystone/auth/plugins/oauth1.py
+++ b/keystone-moon/keystone/auth/plugins/oauth1.py
@@ -29,15 +29,9 @@ LOG = log.getLogger(__name__)
@dependency.requires('oauth_api')
class OAuth(auth.AuthMethodHandler):
-
- method = 'oauth1'
-
def authenticate(self, context, auth_info, auth_context):
"""Turn a signed request with an access key into a keystone token."""
- if not self.oauth_api:
- raise exception.Unauthorized(_('%s not supported') % self.method)
-
headers = context['headers']
oauth_headers = oauth.get_oauth_headers(headers)
access_token_id = oauth_headers.get('oauth_token')
diff --git a/keystone-moon/keystone/auth/plugins/password.py b/keystone-moon/keystone/auth/plugins/password.py
index c5770445..16492a32 100644
--- a/keystone-moon/keystone/auth/plugins/password.py
+++ b/keystone-moon/keystone/auth/plugins/password.py
@@ -20,6 +20,7 @@ from keystone.common import dependency
from keystone import exception
from keystone.i18n import _
+
METHOD_NAME = 'password'
LOG = log.getLogger(__name__)
@@ -28,11 +29,9 @@ LOG = log.getLogger(__name__)
@dependency.requires('identity_api')
class Password(auth.AuthMethodHandler):
- method = METHOD_NAME
-
def authenticate(self, context, auth_payload, auth_context):
"""Try to authenticate against the identity backend."""
- user_info = auth_plugins.UserAuthInfo.create(auth_payload, self.method)
+ user_info = auth_plugins.UserAuthInfo.create(auth_payload, METHOD_NAME)
# FIXME(gyee): identity.authenticate() can use some refactoring since
# all we care is password matches
diff --git a/keystone-moon/keystone/auth/plugins/saml2.py b/keystone-moon/keystone/auth/plugins/saml2.py
index 744f26a9..cf7a8a50 100644
--- a/keystone-moon/keystone/auth/plugins/saml2.py
+++ b/keystone-moon/keystone/auth/plugins/saml2.py
@@ -23,5 +23,4 @@ This plugin subclasses mapped.Mapped, and may be specified in keystone.conf:
class Saml2(mapped.Mapped):
-
- method = 'saml2'
+ pass
diff --git a/keystone-moon/keystone/auth/plugins/token.py b/keystone-moon/keystone/auth/plugins/token.py
index 5ca0b257..069f1140 100644
--- a/keystone-moon/keystone/auth/plugins/token.py
+++ b/keystone-moon/keystone/auth/plugins/token.py
@@ -33,8 +33,6 @@ CONF = cfg.CONF
@dependency.requires('federation_api', 'identity_api', 'token_provider_api')
class Token(auth.AuthMethodHandler):
- method = 'token'
-
def _get_token_ref(self, auth_payload):
token_id = auth_payload['id']
response = self.token_provider_api.validate_token(token_id)
@@ -44,7 +42,7 @@ class Token(auth.AuthMethodHandler):
def authenticate(self, context, auth_payload, user_context):
if 'id' not in auth_payload:
raise exception.ValidationError(attribute='id',
- target=self.method)
+ target='token')
token_ref = self._get_token_ref(auth_payload)
if token_ref.is_federated_user and self.federation_api:
mapped.handle_scoped_token(
diff --git a/keystone-moon/keystone/catalog/backends/sql.py b/keystone-moon/keystone/catalog/backends/sql.py
index 8ab82305..0db6d498 100644
--- a/keystone-moon/keystone/catalog/backends/sql.py
+++ b/keystone-moon/keystone/catalog/backends/sql.py
@@ -16,7 +16,6 @@
import itertools
from oslo_config import cfg
-import six
import sqlalchemy
from sqlalchemy.sql import true
@@ -269,10 +268,28 @@ class Catalog(catalog.Driver):
return ref.to_dict()
def get_catalog(self, user_id, tenant_id):
+ """Retrieve and format the V2 service catalog.
+
+ :param user_id: The id of the user who has been authenticated for
+ creating service catalog.
+ :param tenant_id: The id of the project. 'tenant_id' will be None
+ in the case this being called to create a catalog to go in a
+ domain scoped token. In this case, any endpoint that requires
+ a tenant_id as part of their URL will be skipped (as would a whole
+ service if, as a consequence, it has no valid endpoints).
+
+ :returns: A nested dict representing the service catalog or an
+ empty dict.
+
+ """
substitutions = dict(
- itertools.chain(six.iteritems(CONF),
- six.iteritems(CONF.eventlet_server)))
- substitutions.update({'tenant_id': tenant_id, 'user_id': user_id})
+ itertools.chain(CONF.items(), CONF.eventlet_server.items()))
+ substitutions.update({'user_id': user_id})
+ silent_keyerror_failures = []
+ if tenant_id:
+ substitutions.update({'tenant_id': tenant_id})
+ else:
+ silent_keyerror_failures = ['tenant_id']
session = sql.get_session()
endpoints = (session.query(Endpoint).
@@ -285,7 +302,13 @@ class Catalog(catalog.Driver):
if not endpoint.service['enabled']:
continue
try:
- url = core.format_url(endpoint['url'], substitutions)
+ formatted_url = core.format_url(
+ endpoint['url'], substitutions,
+ silent_keyerror_failures=silent_keyerror_failures)
+ if formatted_url is not None:
+ url = formatted_url
+ else:
+ continue
except exception.MalformedEndpoint:
continue # this failure is already logged in format_url()
@@ -304,11 +327,26 @@ class Catalog(catalog.Driver):
return catalog
def get_v3_catalog(self, user_id, tenant_id):
+ """Retrieve and format the current V3 service catalog.
+
+ :param user_id: The id of the user who has been authenticated for
+ creating service catalog.
+ :param tenant_id: The id of the project. 'tenant_id' will be None in
+ the case this being called to create a catalog to go in a domain
+ scoped token. In this case, any endpoint that requires a
+ tenant_id as part of their URL will be skipped.
+
+ :returns: A list representing the service catalog or an empty list
+
+ """
d = dict(
- itertools.chain(six.iteritems(CONF),
- six.iteritems(CONF.eventlet_server)))
- d.update({'tenant_id': tenant_id,
- 'user_id': user_id})
+ itertools.chain(CONF.items(), CONF.eventlet_server.items()))
+ d.update({'user_id': user_id})
+ silent_keyerror_failures = []
+ if tenant_id:
+ d.update({'tenant_id': tenant_id})
+ else:
+ silent_keyerror_failures = ['tenant_id']
session = sql.get_session()
services = (session.query(Service).filter(Service.enabled == true()).
@@ -322,12 +360,20 @@ class Catalog(catalog.Driver):
del endpoint['enabled']
endpoint['region'] = endpoint['region_id']
try:
- endpoint['url'] = core.format_url(endpoint['url'], d)
+ formatted_url = core.format_url(
+ endpoint['url'], d,
+ silent_keyerror_failures=silent_keyerror_failures)
+ if formatted_url:
+ endpoint['url'] = formatted_url
+ else:
+ continue
except exception.MalformedEndpoint:
continue # this failure is already logged in format_url()
yield endpoint
+ # TODO(davechen): If there is service with no endpoints, we should skip
+ # the service instead of keeping it in the catalog, see bug #1436704.
def make_v3_service(svc):
eps = list(make_v3_endpoints(svc.endpoints))
service = {'endpoints': eps, 'id': svc.id, 'type': svc.type}
diff --git a/keystone-moon/keystone/catalog/backends/templated.py b/keystone-moon/keystone/catalog/backends/templated.py
index d3ee105d..31d8b9e0 100644
--- a/keystone-moon/keystone/catalog/backends/templated.py
+++ b/keystone-moon/keystone/catalog/backends/templated.py
@@ -17,7 +17,6 @@ import os.path
from oslo_config import cfg
from oslo_log import log
-import six
from keystone.catalog.backends import kvs
from keystone.catalog import core
@@ -107,19 +106,43 @@ class Catalog(kvs.Catalog):
raise
def get_catalog(self, user_id, tenant_id):
+ """Retrieve and format the V2 service catalog.
+
+ :param user_id: The id of the user who has been authenticated for
+ creating service catalog.
+ :param tenant_id: The id of the project. 'tenant_id' will be None in
+ the case this being called to create a catalog to go in a domain
+ scoped token. In this case, any endpoint that requires a tenant_id
+ as part of their URL will be skipped.
+
+ :returns: A nested dict representing the service catalog or an
+ empty dict.
+
+ """
substitutions = dict(
- itertools.chain(six.iteritems(CONF),
- six.iteritems(CONF.eventlet_server)))
- substitutions.update({'tenant_id': tenant_id, 'user_id': user_id})
+ itertools.chain(CONF.items(), CONF.eventlet_server.items()))
+ substitutions.update({'user_id': user_id})
+ silent_keyerror_failures = []
+ if tenant_id:
+ substitutions.update({'tenant_id': tenant_id})
+ else:
+ silent_keyerror_failures = ['tenant_id']
catalog = {}
- for region, region_ref in six.iteritems(self.templates):
+ # TODO(davechen): If there is service with no endpoints, we should
+ # skip the service instead of keeping it in the catalog.
+ # see bug #1436704.
+ for region, region_ref in self.templates.items():
catalog[region] = {}
- for service, service_ref in six.iteritems(region_ref):
+ for service, service_ref in region_ref.items():
service_data = {}
try:
- for k, v in six.iteritems(service_ref):
- service_data[k] = core.format_url(v, substitutions)
+ for k, v in service_ref.items():
+ formatted_value = core.format_url(
+ v, substitutions,
+ silent_keyerror_failures=silent_keyerror_failures)
+ if formatted_value:
+ service_data[k] = formatted_value
except exception.MalformedEndpoint:
continue # this failure is already logged in format_url()
catalog[region][service] = service_data
diff --git a/keystone-moon/keystone/catalog/controllers.py b/keystone-moon/keystone/catalog/controllers.py
index 3518c4bf..92046e8a 100644
--- a/keystone-moon/keystone/catalog/controllers.py
+++ b/keystone-moon/keystone/catalog/controllers.py
@@ -15,8 +15,7 @@
import uuid
-import six
-
+from keystone.catalog import core
from keystone.catalog import schema
from keystone.common import controller
from keystone.common import dependency
@@ -88,7 +87,7 @@ class Endpoint(controller.V2Controller):
# add the legacy endpoint with an interface url
legacy_ep['%surl' % endpoint['interface']] = endpoint['url']
- return {'endpoints': legacy_endpoints.values()}
+ return {'endpoints': list(legacy_endpoints.values())}
@controller.v2_deprecated
def create_endpoint(self, context, endpoint):
@@ -100,6 +99,14 @@ class Endpoint(controller.V2Controller):
# service_id is necessary
self._require_attribute(endpoint, 'service_id')
+ # we should check publicurl, adminurl, internalurl
+ # if invalid, we should raise an exception to reject
+ # the request
+ for interface in INTERFACES:
+ interface_url = endpoint.get(interface + 'url')
+ if interface_url:
+ core.check_endpoint_url(interface_url)
+
initiator = notifications._get_request_audit_info(context)
if endpoint.get('region') is not None:
@@ -124,7 +131,7 @@ class Endpoint(controller.V2Controller):
legacy_endpoint_ref.pop(url)
legacy_endpoint_id = uuid.uuid4().hex
- for interface, url in six.iteritems(urls):
+ for interface, url in urls.items():
endpoint_ref = endpoint.copy()
endpoint_ref['id'] = uuid.uuid4().hex
endpoint_ref['legacy_endpoint_id'] = legacy_endpoint_id
@@ -301,13 +308,14 @@ class EndpointV3(controller.V3Controller):
@controller.protected()
@validation.validated(schema.endpoint_create, 'endpoint')
def create_endpoint(self, context, endpoint):
+ core.check_endpoint_url(endpoint['url'])
ref = self._assign_unique_id(self._normalize_dict(endpoint))
ref = self._validate_endpoint_region(ref, context)
initiator = notifications._get_request_audit_info(context)
ref = self.catalog_api.create_endpoint(ref['id'], ref, initiator)
return EndpointV3.wrap_member(context, ref)
- @controller.filterprotected('interface', 'service_id')
+ @controller.filterprotected('interface', 'service_id', 'region_id')
def list_endpoints(self, context, filters):
hints = EndpointV3.build_driver_hints(context, filters)
refs = self.catalog_api.list_endpoints(hints=hints)
diff --git a/keystone-moon/keystone/catalog/core.py b/keystone-moon/keystone/catalog/core.py
index fba26b89..6883b024 100644
--- a/keystone-moon/keystone/catalog/core.py
+++ b/keystone-moon/keystone/catalog/core.py
@@ -16,6 +16,7 @@
"""Main entry point into the Catalog service."""
import abc
+import itertools
from oslo_config import cfg
from oslo_log import log
@@ -35,25 +36,27 @@ from keystone import notifications
CONF = cfg.CONF
LOG = log.getLogger(__name__)
MEMOIZE = cache.get_memoization_decorator(section='catalog')
+WHITELISTED_PROPERTIES = [
+ 'tenant_id', 'user_id', 'public_bind_host', 'admin_bind_host',
+ 'compute_host', 'admin_port', 'public_port',
+ 'public_endpoint', 'admin_endpoint', ]
-def format_url(url, substitutions):
+def format_url(url, substitutions, silent_keyerror_failures=None):
"""Formats a user-defined URL with the given substitutions.
:param string url: the URL to be formatted
:param dict substitutions: the dictionary used for substitution
+ :param list silent_keyerror_failures: keys for which we should be silent
+ if there is a KeyError exception on substitution attempt
:returns: a formatted URL
"""
- WHITELISTED_PROPERTIES = [
- 'tenant_id', 'user_id', 'public_bind_host', 'admin_bind_host',
- 'compute_host', 'compute_port', 'admin_port', 'public_port',
- 'public_endpoint', 'admin_endpoint', ]
-
substitutions = utils.WhiteListedItemFilter(
WHITELISTED_PROPERTIES,
substitutions)
+ allow_keyerror = silent_keyerror_failures or []
try:
result = url.replace('$(', '%(') % substitutions
except AttributeError:
@@ -61,10 +64,14 @@ def format_url(url, substitutions):
{"url": url})
raise exception.MalformedEndpoint(endpoint=url)
except KeyError as e:
- LOG.error(_LE("Malformed endpoint %(url)s - unknown key %(keyerror)s"),
- {"url": url,
- "keyerror": e})
- raise exception.MalformedEndpoint(endpoint=url)
+ if not e.args or e.args[0] not in allow_keyerror:
+ LOG.error(_LE("Malformed endpoint %(url)s - unknown key "
+ "%(keyerror)s"),
+ {"url": url,
+ "keyerror": e})
+ raise exception.MalformedEndpoint(endpoint=url)
+ else:
+ result = None
except TypeError as e:
LOG.error(_LE("Malformed endpoint '%(url)s'. The following type error "
"occurred during string substitution: %(typeerror)s"),
@@ -78,6 +85,28 @@ def format_url(url, substitutions):
return result
+def check_endpoint_url(url):
+ """Check substitution of url.
+
+ The invalid urls are as follows:
+ urls with substitutions that is not in the whitelist
+
+ Check the substitutions in the URL to make sure they are valid
+ and on the whitelist.
+
+ :param str url: the URL to validate
+ :rtype: None
+ :raises keystone.exception.URLValidationError: if the URL is invalid
+ """
+ # check whether the property in the path is exactly the same
+ # with that in the whitelist below
+ substitutions = dict(zip(WHITELISTED_PROPERTIES, itertools.repeat('')))
+ try:
+ url.replace('$(', '%(') % substitutions
+ except (KeyError, TypeError, ValueError):
+ raise exception.URLValidationError(url)
+
+
@dependency.provider('catalog_api')
class Manager(manager.Manager):
"""Default pivot point for the Catalog backend.
@@ -86,6 +115,9 @@ class Manager(manager.Manager):
dynamically calls the backend.
"""
+
+ driver_namespace = 'keystone.catalog'
+
_ENDPOINT = 'endpoint'
_SERVICE = 'service'
_REGION = 'region'
@@ -103,10 +135,12 @@ class Manager(manager.Manager):
msg = _('Duplicate ID, %s.') % region_ref['id']
raise exception.Conflict(type='region', details=msg)
- # NOTE(lbragstad): The description column of the region database
- # can not be null. So if the user doesn't pass in a description then
- # set it to an empty string.
- region_ref.setdefault('description', '')
+ # NOTE(lbragstad,dstanek): The description column of the region
+ # database cannot be null. So if the user doesn't pass in a
+ # description or passes in a null description then set it to an
+ # empty string.
+ if region_ref.get('description') is None:
+ region_ref['description'] = ''
try:
ret = self.driver.create_region(region_ref)
except exception.NotFound:
@@ -124,6 +158,11 @@ class Manager(manager.Manager):
raise exception.RegionNotFound(region_id=region_id)
def update_region(self, region_id, region_ref, initiator=None):
+ # NOTE(lbragstad,dstanek): The description column of the region
+ # database cannot be null. So if the user passes in a null
+ # description set it to an empty string.
+ if 'description' in region_ref and region_ref['description'] is None:
+ region_ref['description'] = ''
ref = self.driver.update_region(region_id, region_ref)
notifications.Audit.updated(self._REGION, region_id, initiator)
self.get_region.invalidate(self, region_id)
@@ -475,14 +514,14 @@ class Driver(object):
v2_catalog = self.get_catalog(user_id, tenant_id)
v3_catalog = []
- for region_name, region in six.iteritems(v2_catalog):
- for service_type, service in six.iteritems(region):
+ for region_name, region in v2_catalog.items():
+ for service_type, service in region.items():
service_v3 = {
'type': service_type,
'endpoints': []
}
- for attr, value in six.iteritems(service):
+ for attr, value in service.items():
# Attributes that end in URL are interfaces. In the V2
# catalog, these are internalURL, publicURL, and adminURL.
# For example, <region_name>.publicURL=<URL> in the V2
diff --git a/keystone-moon/keystone/catalog/schema.py b/keystone-moon/keystone/catalog/schema.py
index a779ad02..671f1233 100644
--- a/keystone-moon/keystone/catalog/schema.py
+++ b/keystone-moon/keystone/catalog/schema.py
@@ -14,7 +14,9 @@ from keystone.common.validation import parameter_types
_region_properties = {
- 'description': parameter_types.description,
+ 'description': {
+ 'type': ['string', 'null'],
+ },
# NOTE(lbragstad): Regions use ID differently. The user can specify the ID
# or it will be generated automatically.
'id': {
diff --git a/keystone-moon/keystone/cmd/__init__.py b/keystone-moon/keystone/cmd/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/keystone-moon/keystone/cmd/__init__.py
diff --git a/keystone-moon/keystone/cmd/all.py b/keystone-moon/keystone/cmd/all.py
new file mode 100644
index 00000000..c583accd
--- /dev/null
+++ b/keystone-moon/keystone/cmd/all.py
@@ -0,0 +1,39 @@
+#!/usr/bin/env python
+
+# Copyright 2013 OpenStack Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import os
+import sys
+
+
+# If ../../keystone/__init__.py exists, add ../../ to Python search path, so
+# that it will override what happens to be installed in
+# /usr/(local/)lib/python...
+possible_topdir = os.path.normpath(os.path.join(os.path.abspath(__file__),
+ os.pardir,
+ os.pardir,
+ os.pardir))
+if os.path.exists(os.path.join(possible_topdir,
+ 'keystone',
+ '__init__.py')):
+ sys.path.insert(0, possible_topdir)
+
+
+from keystone.server import eventlet as eventlet_server
+
+
+# entry point.
+def main():
+ eventlet_server.run(possible_topdir)
diff --git a/keystone-moon/keystone/cmd/cli.py b/keystone-moon/keystone/cmd/cli.py
new file mode 100644
index 00000000..d993d71c
--- /dev/null
+++ b/keystone-moon/keystone/cmd/cli.py
@@ -0,0 +1,685 @@
+# Copyright 2012 OpenStack Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from __future__ import absolute_import
+from __future__ import print_function
+
+import os
+
+from oslo_config import cfg
+from oslo_log import log
+from oslo_serialization import jsonutils
+import pbr.version
+
+from keystone.common import driver_hints
+from keystone.common import openssl
+from keystone.common import sql
+from keystone.common.sql import migration_helpers
+from keystone.common import utils
+from keystone import config
+from keystone import exception
+from keystone.i18n import _, _LW
+from keystone.server import backends
+from keystone import token
+
+
+CONF = cfg.CONF
+LOG = log.getLogger(__name__)
+
+
+class BaseApp(object):
+
+ name = None
+
+ @classmethod
+ def add_argument_parser(cls, subparsers):
+ parser = subparsers.add_parser(cls.name, help=cls.__doc__)
+ parser.set_defaults(cmd_class=cls)
+ return parser
+
+
+class DbSync(BaseApp):
+ """Sync the database."""
+
+ name = 'db_sync'
+
+ @classmethod
+ def add_argument_parser(cls, subparsers):
+ parser = super(DbSync, cls).add_argument_parser(subparsers)
+ parser.add_argument('version', default=None, nargs='?',
+ help=('Migrate the database up to a specified '
+ 'version. If not provided, db_sync will '
+ 'migrate the database to the latest known '
+ 'version. Schema downgrades are not '
+ 'supported.'))
+ parser.add_argument('--extension', default=None,
+ help=('Migrate the database for the specified '
+ 'extension. If not provided, db_sync will '
+ 'migrate the common repository.'))
+
+ return parser
+
+ @staticmethod
+ def main():
+ version = CONF.command.version
+ extension = CONF.command.extension
+ migration_helpers.sync_database_to_version(extension, version)
+
+
+class DbVersion(BaseApp):
+ """Print the current migration version of the database."""
+
+ name = 'db_version'
+
+ @classmethod
+ def add_argument_parser(cls, subparsers):
+ parser = super(DbVersion, cls).add_argument_parser(subparsers)
+ parser.add_argument('--extension', default=None,
+ help=('Print the migration version of the '
+ 'database for the specified extension. If '
+ 'not provided, print it for the common '
+ 'repository.'))
+
+ @staticmethod
+ def main():
+ extension = CONF.command.extension
+ migration_helpers.print_db_version(extension)
+
+
+class BasePermissionsSetup(BaseApp):
+ """Common user/group setup for file permissions."""
+
+ @classmethod
+ def add_argument_parser(cls, subparsers):
+ parser = super(BasePermissionsSetup,
+ cls).add_argument_parser(subparsers)
+ running_as_root = (os.geteuid() == 0)
+ parser.add_argument('--keystone-user', required=running_as_root)
+ parser.add_argument('--keystone-group', required=running_as_root)
+ return parser
+
+ @staticmethod
+ def get_user_group():
+ keystone_user_id = None
+ keystone_group_id = None
+
+ try:
+ a = CONF.command.keystone_user
+ if a:
+ keystone_user_id = utils.get_unix_user(a)[0]
+ except KeyError:
+ raise ValueError("Unknown user '%s' in --keystone-user" % a)
+
+ try:
+ a = CONF.command.keystone_group
+ if a:
+ keystone_group_id = utils.get_unix_group(a)[0]
+ except KeyError:
+ raise ValueError("Unknown group '%s' in --keystone-group" % a)
+
+ return keystone_user_id, keystone_group_id
+
+
+class BaseCertificateSetup(BasePermissionsSetup):
+ """Provides common options for certificate setup."""
+
+ @classmethod
+ def add_argument_parser(cls, subparsers):
+ parser = super(BaseCertificateSetup,
+ cls).add_argument_parser(subparsers)
+ parser.add_argument('--rebuild', default=False, action='store_true',
+ help=('Rebuild certificate files: erase previous '
+ 'files and regenerate them.'))
+ return parser
+
+
+class PKISetup(BaseCertificateSetup):
+ """Set up Key pairs and certificates for token signing and verification.
+
+ This is NOT intended for production use, see Keystone Configuration
+ documentation for details.
+ """
+
+ name = 'pki_setup'
+
+ @classmethod
+ def main(cls):
+ LOG.warn(_LW('keystone-manage pki_setup is not recommended for '
+ 'production use.'))
+ keystone_user_id, keystone_group_id = cls.get_user_group()
+ conf_pki = openssl.ConfigurePKI(keystone_user_id, keystone_group_id,
+ rebuild=CONF.command.rebuild)
+ conf_pki.run()
+
+
+class SSLSetup(BaseCertificateSetup):
+ """Create key pairs and certificates for HTTPS connections.
+
+ This is NOT intended for production use, see Keystone Configuration
+ documentation for details.
+ """
+
+ name = 'ssl_setup'
+
+ @classmethod
+ def main(cls):
+ LOG.warn(_LW('keystone-manage ssl_setup is not recommended for '
+ 'production use.'))
+ keystone_user_id, keystone_group_id = cls.get_user_group()
+ conf_ssl = openssl.ConfigureSSL(keystone_user_id, keystone_group_id,
+ rebuild=CONF.command.rebuild)
+ conf_ssl.run()
+
+
+class FernetSetup(BasePermissionsSetup):
+ """Setup a key repository for Fernet tokens.
+
+ This also creates a primary key used for both creating and validating
+ Fernet tokens. To improve security, you should rotate your keys (using
+ keystone-manage fernet_rotate, for example).
+
+ """
+
+ name = 'fernet_setup'
+
+ @classmethod
+ def main(cls):
+ from keystone.token.providers.fernet import utils as fernet
+
+ keystone_user_id, keystone_group_id = cls.get_user_group()
+ fernet.create_key_directory(keystone_user_id, keystone_group_id)
+ if fernet.validate_key_repository():
+ fernet.initialize_key_repository(
+ keystone_user_id, keystone_group_id)
+
+
+class FernetRotate(BasePermissionsSetup):
+ """Rotate Fernet encryption keys.
+
+ This assumes you have already run keystone-manage fernet_setup.
+
+ A new primary key is placed into rotation, which is used for new tokens.
+ The old primary key is demoted to secondary, which can then still be used
+ for validating tokens. Excess secondary keys (beyond [fernet_tokens]
+ max_active_keys) are revoked. Revoked keys are permanently deleted. A new
+ staged key will be created and used to validate tokens. The next time key
+ rotation takes place, the staged key will be put into rotation as the
+ primary key.
+
+ Rotating keys too frequently, or with [fernet_tokens] max_active_keys set
+ too low, will cause tokens to become invalid prior to their expiration.
+
+ """
+
+ name = 'fernet_rotate'
+
+ @classmethod
+ def main(cls):
+ from keystone.token.providers.fernet import utils as fernet
+
+ keystone_user_id, keystone_group_id = cls.get_user_group()
+ if fernet.validate_key_repository():
+ fernet.rotate_keys(keystone_user_id, keystone_group_id)
+
+
+class TokenFlush(BaseApp):
+ """Flush expired tokens from the backend."""
+
+ name = 'token_flush'
+
+ @classmethod
+ def main(cls):
+ token_manager = token.persistence.PersistenceManager()
+ token_manager.flush_expired_tokens()
+
+
+class MappingPurge(BaseApp):
+ """Purge the mapping table."""
+
+ name = 'mapping_purge'
+
+ @classmethod
+ def add_argument_parser(cls, subparsers):
+ parser = super(MappingPurge, cls).add_argument_parser(subparsers)
+ parser.add_argument('--all', default=False, action='store_true',
+ help=('Purge all mappings.'))
+ parser.add_argument('--domain-name', default=None,
+ help=('Purge any mappings for the domain '
+ 'specified.'))
+ parser.add_argument('--public-id', default=None,
+ help=('Purge the mapping for the Public ID '
+ 'specified.'))
+ parser.add_argument('--local-id', default=None,
+ help=('Purge the mappings for the Local ID '
+ 'specified.'))
+ parser.add_argument('--type', default=None, choices=['user', 'group'],
+ help=('Purge any mappings for the type '
+ 'specified.'))
+ return parser
+
+ @staticmethod
+ def main():
+ def validate_options():
+ # NOTE(henry-nash); It would be nice to use the argparse automated
+ # checking for this validation, but the only way I can see doing
+ # that is to make the default (i.e. if no optional parameters
+ # are specified) to purge all mappings - and that sounds too
+ # dangerous as a default. So we use it in a slightly
+ # unconventional way, where all parameters are optional, but you
+ # must specify at least one.
+ if (CONF.command.all is False and
+ CONF.command.domain_name is None and
+ CONF.command.public_id is None and
+ CONF.command.local_id is None and
+ CONF.command.type is None):
+ raise ValueError(_('At least one option must be provided'))
+
+ if (CONF.command.all is True and
+ (CONF.command.domain_name is not None or
+ CONF.command.public_id is not None or
+ CONF.command.local_id is not None or
+ CONF.command.type is not None)):
+ raise ValueError(_('--all option cannot be mixed with '
+ 'other options'))
+
+ def get_domain_id(name):
+ try:
+ return resource_manager.get_domain_by_name(name)['id']
+ except KeyError:
+ raise ValueError(_("Unknown domain '%(name)s' specified by "
+ "--domain-name") % {'name': name})
+
+ validate_options()
+ drivers = backends.load_backends()
+ resource_manager = drivers['resource_api']
+ mapping_manager = drivers['id_mapping_api']
+
+ # Now that we have validated the options, we know that at least one
+ # option has been specified, and if it was the --all option then this
+ # was the only option specified.
+ #
+ # The mapping dict is used to filter which mappings are purged, so
+ # leaving it empty means purge them all
+ mapping = {}
+ if CONF.command.domain_name is not None:
+ mapping['domain_id'] = get_domain_id(CONF.command.domain_name)
+ if CONF.command.public_id is not None:
+ mapping['public_id'] = CONF.command.public_id
+ if CONF.command.local_id is not None:
+ mapping['local_id'] = CONF.command.local_id
+ if CONF.command.type is not None:
+ mapping['type'] = CONF.command.type
+
+ mapping_manager.purge_mappings(mapping)
+
+
+DOMAIN_CONF_FHEAD = 'keystone.'
+DOMAIN_CONF_FTAIL = '.conf'
+
+
+class DomainConfigUploadFiles(object):
+
+ def __init__(self):
+ super(DomainConfigUploadFiles, self).__init__()
+ self.load_backends()
+
+ def load_backends(self):
+ drivers = backends.load_backends()
+ self.resource_manager = drivers['resource_api']
+ self.domain_config_manager = drivers['domain_config_api']
+
+ def valid_options(self):
+ """Validate the options, returning True if they are indeed valid.
+
+ It would be nice to use the argparse automated checking for this
+ validation, but the only way I can see doing that is to make the
+ default (i.e. if no optional parameters are specified) to upload
+ all configuration files - and that sounds too dangerous as a
+ default. So we use it in a slightly unconventional way, where all
+ parameters are optional, but you must specify at least one.
+
+ """
+ if (CONF.command.all is False and
+ CONF.command.domain_name is None):
+ print(_('At least one option must be provided, use either '
+ '--all or --domain-name'))
+ raise ValueError
+
+ if (CONF.command.all is True and
+ CONF.command.domain_name is not None):
+ print(_('The --all option cannot be used with '
+ 'the --domain-name option'))
+ raise ValueError
+
+ def upload_config_to_database(self, file_name, domain_name):
+ """Upload a single config file to the database.
+
+ :param file_name: the file containing the config options
+ :param domain_name: the domain name
+
+ :raises: ValueError: the domain does not exist or already has domain
+ specific configurations defined
+ :raises: Exceptions from oslo config: there is an issue with options
+ defined in the config file or its
+ format
+
+ The caller of this method should catch the errors raised and handle
+ appropriately in order that the best UX experience can be provided for
+ both the case of when a user has asked for a specific config file to
+ be uploaded, as well as all config files in a directory.
+
+ """
+ try:
+ domain_ref = (
+ self.resource_manager.get_domain_by_name(domain_name))
+ except exception.DomainNotFound:
+ print(_('Invalid domain name: %(domain)s found in config file '
+ 'name: %(file)s - ignoring this file.') % {
+ 'domain': domain_name,
+ 'file': file_name})
+ raise ValueError
+
+ if self.domain_config_manager.get_config_with_sensitive_info(
+ domain_ref['id']):
+ print(_('Domain: %(domain)s already has a configuration '
+ 'defined - ignoring file: %(file)s.') % {
+ 'domain': domain_name,
+ 'file': file_name})
+ raise ValueError
+
+ sections = {}
+ try:
+ parser = cfg.ConfigParser(file_name, sections)
+ parser.parse()
+ except Exception:
+ # We explicitly don't try and differentiate the error cases, in
+ # order to keep the code in this tool more robust as oslo.config
+ # changes.
+ print(_('Error parsing configuration file for domain: %(domain)s, '
+ 'file: %(file)s.') % {
+ 'domain': domain_name,
+ 'file': file_name})
+ raise
+
+ for group in sections:
+ for option in sections[group]:
+ sections[group][option] = sections[group][option][0]
+ self.domain_config_manager.create_config(domain_ref['id'], sections)
+
+ def upload_configs_to_database(self, file_name, domain_name):
+ """Upload configs from file and load into database.
+
+ This method will be called repeatedly for all the config files in the
+ config directory. To provide a better UX, we differentiate the error
+ handling in this case (versus when the user has asked for a single
+ config file to be uploaded).
+
+ """
+ try:
+ self.upload_config_to_database(file_name, domain_name)
+ except ValueError:
+ # We've already given all the info we can in a message, so carry
+ # on to the next one
+ pass
+ except Exception:
+ # Some other error occurred relating to this specific config file
+ # or domain. Since we are trying to upload all the config files,
+ # we'll continue and hide this exception. However, we tell the
+ # user how to get more info about this error by re-running with
+ # just the domain at fault. When we run in single-domain mode we
+ # will NOT hide the exception.
+ print(_('To get a more detailed information on this error, re-run '
+ 'this command for the specific domain, i.e.: '
+ 'keystone-manage domain_config_upload --domain-name %s') %
+ domain_name)
+ pass
+
+ def read_domain_configs_from_files(self):
+ """Read configs from file(s) and load into database.
+
+ The command line parameters have already been parsed and the CONF
+ command option will have been set. It is either set to the name of an
+ explicit domain, or it's None to indicate that we want all domain
+ config files.
+
+ """
+ domain_name = CONF.command.domain_name
+ conf_dir = CONF.identity.domain_config_dir
+ if not os.path.exists(conf_dir):
+ print(_('Unable to locate domain config directory: %s') % conf_dir)
+ raise ValueError
+
+ if domain_name:
+ # Request is to upload the configs for just one domain
+ fname = DOMAIN_CONF_FHEAD + domain_name + DOMAIN_CONF_FTAIL
+ self.upload_config_to_database(
+ os.path.join(conf_dir, fname), domain_name)
+ return
+
+ # Request is to transfer all config files, so let's read all the
+ # files in the config directory, and transfer those that match the
+ # filename pattern of 'keystone.<domain_name>.conf'
+ for r, d, f in os.walk(conf_dir):
+ for fname in f:
+ if (fname.startswith(DOMAIN_CONF_FHEAD) and
+ fname.endswith(DOMAIN_CONF_FTAIL)):
+ if fname.count('.') >= 2:
+ self.upload_configs_to_database(
+ os.path.join(r, fname),
+ fname[len(DOMAIN_CONF_FHEAD):
+ -len(DOMAIN_CONF_FTAIL)])
+ else:
+ LOG.warn(_LW('Ignoring file (%s) while scanning '
+ 'domain config directory'), fname)
+
+ def run(self):
+ # First off, let's just check we can talk to the domain database
+ try:
+ self.resource_manager.list_domains(driver_hints.Hints())
+ except Exception:
+ # It is likely that there is some SQL or other backend error
+ # related to set up
+ print(_('Unable to access the keystone database, please check it '
+ 'is configured correctly.'))
+ raise
+
+ try:
+ self.valid_options()
+ self.read_domain_configs_from_files()
+ except ValueError:
+ # We will already have printed out a nice message, so indicate
+ # to caller the non-success error code to be used.
+ return 1
+
+
+class DomainConfigUpload(BaseApp):
+ """Upload the domain specific configuration files to the database."""
+
+ name = 'domain_config_upload'
+
+ @classmethod
+ def add_argument_parser(cls, subparsers):
+ parser = super(DomainConfigUpload, cls).add_argument_parser(subparsers)
+ parser.add_argument('--all', default=False, action='store_true',
+ help='Upload contents of all domain specific '
+ 'configuration files. Either use this option '
+ 'or use the --domain-name option to choose a '
+ 'specific domain.')
+ parser.add_argument('--domain-name', default=None,
+ help='Upload contents of the specific '
+ 'configuration file for the given domain. '
+ 'Either use this option or use the --all '
+ 'option to upload contents for all domains.')
+ return parser
+
+ @staticmethod
+ def main():
+ dcu = DomainConfigUploadFiles()
+ status = dcu.run()
+ if status is not None:
+ exit(status)
+
+
+class SamlIdentityProviderMetadata(BaseApp):
+ """Generate Identity Provider metadata."""
+
+ name = 'saml_idp_metadata'
+
+ @staticmethod
+ def main():
+ # NOTE(marek-denis): Since federation is currently an extension import
+ # corresponding modules only when they are really going to be used.
+ from keystone.contrib.federation import idp
+ metadata = idp.MetadataGenerator().generate_metadata()
+ print(metadata.to_string())
+
+
+class MappingEngineTester(BaseApp):
+ """Execute mapping engine locally."""
+
+ name = 'mapping_engine'
+
+ @staticmethod
+ def read_rules(path):
+ try:
+ with open(path) as file:
+ return jsonutils.load(file)
+ except ValueError as e:
+ raise SystemExit(_('Error while parsing rules '
+ '%(path)s: %(err)s') % {'path': path, 'err': e})
+
+ @staticmethod
+ def read_file(path):
+ try:
+ with open(path) as file:
+ return file.read().strip()
+ except IOError as e:
+ raise SystemExit(_("Error while opening file "
+ "%(path)s: %(err)s") % {'path': path, 'err': e})
+
+ @staticmethod
+ def normalize_assertion(assertion):
+ def split(line):
+ try:
+ k, v = line.split(':', 1)
+ return k.strip(), v.strip()
+ except ValueError as e:
+ msg = _("Error while parsing line: '%(line)s': %(err)s")
+ raise SystemExit(msg % {'line': line, 'err': e})
+ assertion = assertion.split('\n')
+ assertion_dict = {}
+ prefix = CONF.command.prefix
+ for line in assertion:
+ k, v = split(line)
+ if prefix:
+ if k.startswith(prefix):
+ assertion_dict[k] = v
+ else:
+ assertion_dict[k] = v
+ return assertion_dict
+
+ @staticmethod
+ def normalize_rules(rules):
+ if isinstance(rules, list):
+ return {'rules': rules}
+ else:
+ return rules
+
+ @classmethod
+ def main(cls):
+ from keystone.contrib.federation import utils as mapping_engine
+ if not CONF.command.engine_debug:
+ mapping_engine.LOG.logger.setLevel('WARN')
+
+ rules = MappingEngineTester.read_rules(CONF.command.rules)
+ rules = MappingEngineTester.normalize_rules(rules)
+ mapping_engine.validate_mapping_structure(rules)
+
+ assertion = MappingEngineTester.read_file(CONF.command.input)
+ assertion = MappingEngineTester.normalize_assertion(assertion)
+ rp = mapping_engine.RuleProcessor(rules['rules'])
+ print(jsonutils.dumps(rp.process(assertion), indent=2))
+
+ @classmethod
+ def add_argument_parser(cls, subparsers):
+ parser = super(MappingEngineTester,
+ cls).add_argument_parser(subparsers)
+
+ parser.add_argument('--rules', default=None, required=True,
+ help=("Path to the file with "
+ "rules to be executed. "
+ "Content must be a proper JSON structure, "
+ "with a top-level key 'rules' and "
+ "corresponding value being a list."))
+ parser.add_argument('--input', default=None, required=True,
+ help=("Path to the file with input attributes. "
+ "The content consists of ':' separated "
+ "parameter names and their values. "
+ "There is only one key-value pair per line. "
+ "A ';' in the value is a separator and then "
+ "a value is treated as a list. Example:\n "
+ "EMAIL: me@example.com\n"
+ "LOGIN: me\n"
+ "GROUPS: group1;group2;group3"))
+ parser.add_argument('--prefix', default=None,
+ help=("A prefix used for each environment "
+ "variable in the assertion. For example, "
+ "all environment variables may have the "
+ "prefix ASDF_."))
+ parser.add_argument('--engine-debug',
+ default=False, action="store_true",
+ help=("Enable debug messages from the mapping "
+ "engine."))
+
+
+CMDS = [
+ DbSync,
+ DbVersion,
+ DomainConfigUpload,
+ FernetRotate,
+ FernetSetup,
+ MappingPurge,
+ MappingEngineTester,
+ PKISetup,
+ SamlIdentityProviderMetadata,
+ SSLSetup,
+ TokenFlush,
+]
+
+
+def add_command_parsers(subparsers):
+ for cmd in CMDS:
+ cmd.add_argument_parser(subparsers)
+
+
+command_opt = cfg.SubCommandOpt('command',
+ title='Commands',
+ help='Available commands',
+ handler=add_command_parsers)
+
+
+def main(argv=None, config_files=None):
+ CONF.register_cli_opt(command_opt)
+
+ config.configure()
+ sql.initialize()
+ config.set_default_for_default_log_levels()
+
+ CONF(args=argv[1:],
+ project='keystone',
+ version=pbr.version.VersionInfo('keystone').version_string(),
+ usage='%(prog)s [' + '|'.join([cmd.name for cmd in CMDS]) + ']',
+ default_config_files=config_files)
+ config.setup_logging()
+ CONF.command.cmd_class.main()
diff --git a/keystone-moon/keystone/cmd/manage.py b/keystone-moon/keystone/cmd/manage.py
new file mode 100644
index 00000000..da38278e
--- /dev/null
+++ b/keystone-moon/keystone/cmd/manage.py
@@ -0,0 +1,47 @@
+#!/usr/bin/env python
+
+# Copyright 2013 OpenStack Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import os
+import sys
+
+# If ../../keystone/__init__.py exists, add ../../ to Python search path, so
+# that it will override what happens to be installed in
+# /usr/(local/)lib/python...
+possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]),
+ os.pardir,
+ os.pardir,
+ os.pardir))
+if os.path.exists(os.path.join(possible_topdir,
+ 'keystone',
+ '__init__.py')):
+ sys.path.insert(0, possible_topdir)
+
+from keystone.cmd import cli
+from keystone.common import environment
+
+
+# entry point.
+def main():
+ environment.use_stdlib()
+
+ dev_conf = os.path.join(possible_topdir,
+ 'etc',
+ 'keystone.conf')
+ config_files = None
+ if os.path.exists(dev_conf):
+ config_files = [dev_conf]
+
+ cli.main(argv=sys.argv, config_files=config_files)
diff --git a/keystone-moon/keystone/common/authorization.py b/keystone-moon/keystone/common/authorization.py
index 5cb1e630..8db618df 100644
--- a/keystone-moon/keystone/common/authorization.py
+++ b/keystone-moon/keystone/common/authorization.py
@@ -59,6 +59,7 @@ def token_to_auth_context(token):
auth_context['project_id'] = token.project_id
elif token.domain_scoped:
auth_context['domain_id'] = token.domain_id
+ auth_context['domain_name'] = token.domain_name
else:
LOG.debug('RBAC: Proceeding without project or domain scope')
diff --git a/keystone-moon/keystone/common/base64utils.py b/keystone-moon/keystone/common/base64utils.py
index 1a636f9b..d19eade7 100644
--- a/keystone-moon/keystone/common/base64utils.py
+++ b/keystone-moon/keystone/common/base64utils.py
@@ -57,8 +57,13 @@ base64url_non_alphabet_re = re.compile(r'[^A-Za-z0-9---_=]+')
_strip_formatting_re = re.compile(r'\s+')
-_base64_to_base64url_trans = string.maketrans('+/', '-_')
-_base64url_to_base64_trans = string.maketrans('-_', '+/')
+if six.PY2:
+ str_ = string
+else:
+ str_ = str
+
+_base64_to_base64url_trans = str_.maketrans('+/', '-_')
+_base64url_to_base64_trans = str_.maketrans('-_', '+/')
def _check_padding_length(pad):
diff --git a/keystone-moon/keystone/common/cache/_memcache_pool.py b/keystone-moon/keystone/common/cache/_memcache_pool.py
index b15332db..2bfcc3bb 100644
--- a/keystone-moon/keystone/common/cache/_memcache_pool.py
+++ b/keystone-moon/keystone/common/cache/_memcache_pool.py
@@ -27,7 +27,7 @@ import time
import memcache
from oslo_log import log
-from six.moves import queue
+from six.moves import queue, zip
from keystone import exception
from keystone.i18n import _
@@ -35,11 +35,22 @@ from keystone.i18n import _
LOG = log.getLogger(__name__)
-# This 'class' is taken from http://stackoverflow.com/a/22520633/238308
-# Don't inherit client from threading.local so that we can reuse clients in
-# different threads
-_MemcacheClient = type('_MemcacheClient', (object,),
- dict(memcache.Client.__dict__))
+
+class _MemcacheClient(memcache.Client):
+ """Thread global memcache client
+
+ As client is inherited from threading.local we have to restore object
+ methods overloaded by threading.local so we can reuse clients in
+ different threads
+ """
+ __delattr__ = object.__delattr__
+ __getattribute__ = object.__getattribute__
+ __new__ = object.__new__
+ __setattr__ = object.__setattr__
+
+ def __del__(self):
+ pass
+
_PoolItem = collections.namedtuple('_PoolItem', ['ttl', 'connection'])
diff --git a/keystone-moon/keystone/common/cache/backends/mongo.py b/keystone-moon/keystone/common/cache/backends/mongo.py
index b5de9bc4..cb5ad833 100644
--- a/keystone-moon/keystone/common/cache/backends/mongo.py
+++ b/keystone-moon/keystone/common/cache/backends/mongo.py
@@ -360,8 +360,12 @@ class MongoApi(object):
self._assign_data_mainpulator()
if self.read_preference:
- self.read_preference = pymongo.read_preferences.mongos_enum(
- self.read_preference)
+ # pymongo 3.0 renamed mongos_enum to read_pref_mode_from_name
+ f = getattr(pymongo.read_preferences,
+ 'read_pref_mode_from_name', None)
+ if not f:
+ f = pymongo.read_preferences.mongos_enum
+ self.read_preference = f(self.read_preference)
coll.read_preference = self.read_preference
if self.w > -1:
coll.write_concern['w'] = self.w
@@ -395,7 +399,7 @@ class MongoApi(object):
Refer to MongoDB documentation around TTL index for further details.
"""
indexes = collection.index_information()
- for indx_name, index_data in six.iteritems(indexes):
+ for indx_name, index_data in indexes.items():
if all(k in index_data for k in ('key', 'expireAfterSeconds')):
existing_value = index_data['expireAfterSeconds']
fld_present = 'doc_date' in index_data['key'][0]
@@ -447,7 +451,7 @@ class MongoApi(object):
doc_date = self._get_doc_date()
insert_refs = []
update_refs = []
- existing_docs = self._get_results_as_dict(mapping.keys())
+ existing_docs = self._get_results_as_dict(list(mapping.keys()))
for key, value in mapping.items():
ref = self._get_cache_entry(key, value.payload, value.metadata,
doc_date)
@@ -532,7 +536,7 @@ class BaseTransform(AbstractManipulator):
def transform_incoming(self, son, collection):
"""Used while saving data to MongoDB."""
- for (key, value) in son.items():
+ for (key, value) in list(son.items()):
if isinstance(value, api.CachedValue):
son[key] = value.payload # key is 'value' field here
son['meta'] = value.metadata
@@ -549,7 +553,7 @@ class BaseTransform(AbstractManipulator):
('_id', 'value', 'meta', 'doc_date')):
payload = son.pop('value', None)
metadata = son.pop('meta', None)
- for (key, value) in son.items():
+ for (key, value) in list(son.items()):
if isinstance(value, dict):
son[key] = self.transform_outgoing(value, collection)
if metadata is not None:
diff --git a/keystone-moon/keystone/common/clean.py b/keystone-moon/keystone/common/clean.py
new file mode 100644
index 00000000..38564e0b
--- /dev/null
+++ b/keystone-moon/keystone/common/clean.py
@@ -0,0 +1,87 @@
+# Copyright 2012 OpenStack Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import six
+
+from keystone import exception
+from keystone.i18n import _
+
+
+def check_length(property_name, value, min_length=1, max_length=64):
+ if len(value) < min_length:
+ if min_length == 1:
+ msg = _("%s cannot be empty.") % property_name
+ else:
+ msg = (_("%(property_name)s cannot be less than "
+ "%(min_length)s characters.") % dict(
+ property_name=property_name, min_length=min_length))
+ raise exception.ValidationError(msg)
+ if len(value) > max_length:
+ msg = (_("%(property_name)s should not be greater than "
+ "%(max_length)s characters.") % dict(
+ property_name=property_name, max_length=max_length))
+
+ raise exception.ValidationError(msg)
+
+
+def check_type(property_name, value, expected_type, display_expected_type):
+ if not isinstance(value, expected_type):
+ msg = (_("%(property_name)s is not a "
+ "%(display_expected_type)s") % dict(
+ property_name=property_name,
+ display_expected_type=display_expected_type))
+ raise exception.ValidationError(msg)
+
+
+def check_enabled(property_name, enabled):
+ # Allow int and it's subclass bool
+ check_type('%s enabled' % property_name, enabled, int, 'boolean')
+ return bool(enabled)
+
+
+def check_name(property_name, name, min_length=1, max_length=64):
+ check_type('%s name' % property_name, name, six.string_types,
+ 'str or unicode')
+ name = name.strip()
+ check_length('%s name' % property_name, name,
+ min_length=min_length, max_length=max_length)
+ return name
+
+
+def domain_name(name):
+ return check_name('Domain', name)
+
+
+def domain_enabled(enabled):
+ return check_enabled('Domain', enabled)
+
+
+def project_name(name):
+ return check_name('Project', name)
+
+
+def project_enabled(enabled):
+ return check_enabled('Project', enabled)
+
+
+def user_name(name):
+ return check_name('User', name, max_length=255)
+
+
+def user_enabled(enabled):
+ return check_enabled('User', enabled)
+
+
+def group_name(name):
+ return check_name('Group', name)
diff --git a/keystone-moon/keystone/common/config.py b/keystone-moon/keystone/common/config.py
index bcaedeef..6cc848b4 100644
--- a/keystone-moon/keystone/common/config.py
+++ b/keystone-moon/keystone/common/config.py
@@ -14,6 +14,7 @@
from oslo_config import cfg
import oslo_messaging
+import passlib.utils
_DEFAULT_AUTH_METHODS = ['external', 'password', 'token', 'oauth1']
@@ -32,14 +33,6 @@ FILE_OPTIONS = {
'AdminTokenAuthMiddleware from your paste '
'application pipelines (for example, in '
'keystone-paste.ini).'),
- cfg.IntOpt('compute_port', default=8774,
- help='(Deprecated) The port which the OpenStack Compute '
- 'service listens on. This option was only used for '
- 'string replacement in the templated catalog backend. '
- 'Templated catalogs should replace the '
- '"$(compute_port)s" substitution with the static port '
- 'of the compute service. As of Juno, this option is '
- 'deprecated and will be removed in the L release.'),
cfg.StrOpt('public_endpoint',
help='The base public endpoint URL for Keystone that is '
'advertised to clients (NOTE: this does NOT affect '
@@ -81,7 +74,13 @@ FILE_OPTIONS = {
help='This is the role name used in combination with the '
'member_role_id option; see that option for more '
'detail.'),
- cfg.IntOpt('crypt_strength', default=40000,
+ # NOTE(lbragstad/morganfainberg): This value of 10k was
+ # measured as having an approximate 30% clock-time savings
+ # over the old default of 40k. The passlib default is not
+ # static and grows over time to constantly approximate ~300ms
+ # of CPU time to hash; this was considered too high. This
+ # value still exceeds the glibc default of 5k.
+ cfg.IntOpt('crypt_strength', default=10000, min=1000, max=100000,
help='The value passed as the keyword "rounds" to '
'passlib\'s encrypt method.'),
cfg.IntOpt('list_limit',
@@ -149,9 +148,10 @@ FILE_OPTIONS = {
'identity configuration files if '
'domain_specific_drivers_enabled is set to true.'),
cfg.StrOpt('driver',
- default=('keystone.identity.backends'
- '.sql.Identity'),
- help='Identity backend driver.'),
+ default='sql',
+ help='Entrypoint for the identity backend driver in the '
+ 'keystone.identity namespace. Supplied drivers are '
+ 'ldap and sql.'),
cfg.BoolOpt('caching', default=True,
help='Toggle for identity caching. This has no '
'effect unless global caching is enabled.'),
@@ -160,6 +160,7 @@ FILE_OPTIONS = {
'no effect unless global and identity caching are '
'enabled.'),
cfg.IntOpt('max_password_length', default=4096,
+ max=passlib.utils.MAX_PASSWORD_SIZE,
help='Maximum supported length for user passwords; '
'decrease to improve performance.'),
cfg.IntOpt('list_limit',
@@ -168,15 +169,16 @@ FILE_OPTIONS = {
],
'identity_mapping': [
cfg.StrOpt('driver',
- default=('keystone.identity.mapping_backends'
- '.sql.Mapping'),
- help='Keystone Identity Mapping backend driver.'),
+ default='sql',
+ help='Entrypoint for the identity mapping backend driver '
+ 'in the keystone.identity.id_mapping namespace.'),
cfg.StrOpt('generator',
- default=('keystone.identity.id_generators'
- '.sha256.Generator'),
- help='Public ID generator for user and group entities. '
- 'The Keystone identity mapper only supports '
- 'generators that produce no more than 64 characters.'),
+ default='sha256',
+ help='Entrypoint for the public ID generator for user and '
+ 'group entities in the keystone.identity.id_generator '
+ 'namespace. The Keystone identity mapper only '
+ 'supports generators that produce no more than 64 '
+ 'characters.'),
cfg.BoolOpt('backward_compatible_ids',
default=True,
help='The format of user and group IDs changed '
@@ -209,8 +211,9 @@ FILE_OPTIONS = {
cfg.IntOpt('max_redelegation_count', default=3,
help='Maximum depth of trust redelegation.'),
cfg.StrOpt('driver',
- default='keystone.trust.backends.sql.Trust',
- help='Trust backend driver.')],
+ default='sql',
+ help='Entrypoint for the trust backend driver in the '
+ 'keystone.trust namespace.')],
'os_inherit': [
cfg.BoolOpt('enabled', default=False,
help='role-assignment inheritance to projects from '
@@ -245,14 +248,17 @@ FILE_OPTIONS = {
help='Amount of time a token should remain valid '
'(in seconds).'),
cfg.StrOpt('provider',
- default='keystone.token.providers.uuid.Provider',
+ default='uuid',
help='Controls the token construction, validation, and '
- 'revocation operations. Core providers are '
- '"keystone.token.providers.[fernet|pkiz|pki|uuid].'
- 'Provider".'),
+ 'revocation operations. Entrypoint in the '
+ 'keystone.token.provider namespace. Core providers '
+ 'are [fernet|pkiz|pki|uuid].'),
cfg.StrOpt('driver',
- default='keystone.token.persistence.backends.sql.Token',
- help='Token persistence backend driver.'),
+ default='sql',
+ help='Entrypoint for the token persistence backend driver '
+ 'in the keystone.token.persistence namespace. '
+ 'Supplied drivers are kvs, memcache, memcache_pool, '
+ 'and sql.'),
cfg.BoolOpt('caching', default=True,
help='Toggle for token system caching. This has no '
'effect unless global caching is enabled.'),
@@ -282,9 +288,10 @@ FILE_OPTIONS = {
],
'revoke': [
cfg.StrOpt('driver',
- default='keystone.contrib.revoke.backends.sql.Revoke',
- help='An implementation of the backend for persisting '
- 'revocation events.'),
+ default='sql',
+ help='Entrypoint for an implementation of the backend for '
+ 'persisting revocation events in the keystone.revoke '
+ 'namespace. Supplied drivers are kvs and sql.'),
cfg.IntOpt('expiration_buffer', default=1800,
help='This value (calculated in seconds) is added to token '
'expiration before a revocation event may be removed '
@@ -326,7 +333,7 @@ FILE_OPTIONS = {
'deployments. Small workloads (single process) '
'like devstack can use the dogpile.cache.memory '
'backend.'),
- cfg.MultiStrOpt('backend_argument', default=[],
+ cfg.MultiStrOpt('backend_argument', default=[], secret=True,
help='Arguments supplied to the backend module. '
'Specify this option once per argument to be '
'passed to the dogpile.cache backend. Example '
@@ -379,7 +386,7 @@ FILE_OPTIONS = {
cfg.StrOpt('ca_key',
default='/etc/keystone/ssl/private/cakey.pem',
help='Path of the CA key file for SSL.'),
- cfg.IntOpt('key_size', default=1024,
+ cfg.IntOpt('key_size', default=1024, min=1024,
help='SSL key length (in bits) (auto generated '
'certificate).'),
cfg.IntOpt('valid_days', default=3650,
@@ -406,7 +413,7 @@ FILE_OPTIONS = {
cfg.StrOpt('ca_key',
default='/etc/keystone/ssl/private/cakey.pem',
help='Path of the CA key for token signing.'),
- cfg.IntOpt('key_size', default=2048,
+ cfg.IntOpt('key_size', default=2048, min=1024,
help='Key size (in bits) for token signing cert '
'(auto generated certificate).'),
cfg.IntOpt('valid_days', default=3650,
@@ -419,17 +426,20 @@ FILE_OPTIONS = {
'token signing.'),
],
'assignment': [
- # assignment has no default for backward compatibility reasons.
- # If assignment driver is not specified, the identity driver chooses
- # the backend
cfg.StrOpt('driver',
- help='Assignment backend driver.'),
+ help='Entrypoint for the assignment backend driver in the '
+ 'keystone.assignment namespace. Supplied drivers are '
+ 'ldap and sql. If an assignment driver is not '
+ 'specified, the identity driver will choose the '
+ 'assignment driver.'),
],
'resource': [
cfg.StrOpt('driver',
- help='Resource backend driver. If a resource driver is '
- 'not specified, the assignment driver will choose '
- 'the resource driver.'),
+ help='Entrypoint for the resource backend driver in the '
+ 'keystone.resource namespace. Supplied drivers are '
+ 'ldap and sql. If a resource driver is not specified, '
+ 'the assignment driver will choose the resource '
+ 'driver.'),
cfg.BoolOpt('caching', default=True,
deprecated_opts=[cfg.DeprecatedOpt('caching',
group='assignment')],
@@ -448,16 +458,25 @@ FILE_OPTIONS = {
],
'domain_config': [
cfg.StrOpt('driver',
- default='keystone.resource.config_backends.sql.'
- 'DomainConfig',
- help='Domain config backend driver.'),
+ default='sql',
+ help='Entrypoint for the domain config backend driver in '
+ 'the keystone.resource.domain_config namespace.'),
+ cfg.BoolOpt('caching', default=True,
+ help='Toggle for domain config caching. This has no '
+ 'effect unless global caching is enabled.'),
+ cfg.IntOpt('cache_time', default=300,
+ help='TTL (in seconds) to cache domain config data. This '
+ 'has no effect unless domain config caching is '
+ 'enabled.'),
],
'role': [
# The role driver has no default for backward compatibility reasons.
# If role driver is not specified, the assignment driver chooses
# the backend
cfg.StrOpt('driver',
- help='Role backend driver.'),
+ help='Entrypoint for the role backend driver in the '
+ 'keystone.role namespace. Supplied drivers are ldap '
+ 'and sql.'),
cfg.BoolOpt('caching', default=True,
help='Toggle for role caching. This has no effect '
'unless global caching is enabled.'),
@@ -470,14 +489,15 @@ FILE_OPTIONS = {
],
'credential': [
cfg.StrOpt('driver',
- default=('keystone.credential.backends'
- '.sql.Credential'),
- help='Credential backend driver.'),
+ default='sql',
+ help='Entrypoint for the credential backend driver in the '
+ 'keystone.credential namespace.'),
],
'oauth1': [
cfg.StrOpt('driver',
- default='keystone.contrib.oauth1.backends.sql.OAuth1',
- help='Credential backend driver.'),
+ default='sql',
+ help='Entrypoint for hte OAuth backend driver in the '
+ 'keystone.oauth1 namespace.'),
cfg.IntOpt('request_token_duration', default=28800,
help='Duration (in seconds) for the OAuth Request Token.'),
cfg.IntOpt('access_token_duration', default=86400,
@@ -485,9 +505,9 @@ FILE_OPTIONS = {
],
'federation': [
cfg.StrOpt('driver',
- default='keystone.contrib.federation.'
- 'backends.sql.Federation',
- help='Federation backend driver.'),
+ default='sql',
+ help='Entrypoint for the federation backend driver in the '
+ 'keystone.federation namespace.'),
cfg.StrOpt('assertion_prefix', default='',
help='Value to be used when filtering assertion parameters '
'from the environment.'),
@@ -502,9 +522,7 @@ FILE_OPTIONS = {
'an admin will not be able to create a domain with '
'this name or update an existing domain to this '
'name. You are not advised to change this value '
- 'unless you really have to. Changing this option '
- 'to empty string or None will not have any impact and '
- 'default name will be used.'),
+ 'unless you really have to.'),
cfg.MultiStrOpt('trusted_dashboard', default=[],
help='A list of trusted dashboard hosts. Before '
'accepting a Single Sign-On request to return a '
@@ -519,26 +537,31 @@ FILE_OPTIONS = {
],
'policy': [
cfg.StrOpt('driver',
- default='keystone.policy.backends.sql.Policy',
- help='Policy backend driver.'),
+ default='sql',
+ help='Entrypoint for the policy backend driver in the '
+ 'keystone.policy namespace. Supplied drivers are '
+ 'rules and sql.'),
cfg.IntOpt('list_limit',
help='Maximum number of entities that will be returned '
'in a policy collection.'),
],
'endpoint_filter': [
cfg.StrOpt('driver',
- default='keystone.contrib.endpoint_filter.backends'
- '.sql.EndpointFilter',
- help='Endpoint Filter backend driver'),
+ default='sql',
+ help='Entrypoint for the endpoint filter backend driver in '
+ 'the keystone.endpoint_filter namespace.'),
cfg.BoolOpt('return_all_endpoints_if_no_filter', default=True,
help='Toggle to return all active endpoints if no filter '
'exists.'),
],
'endpoint_policy': [
+ cfg.BoolOpt('enabled',
+ default=True,
+ help='Enable endpoint_policy functionality.'),
cfg.StrOpt('driver',
- default='keystone.contrib.endpoint_policy.backends'
- '.sql.EndpointPolicy',
- help='Endpoint policy backend driver'),
+ default='sql',
+ help='Entrypoint for the endpoint policy backend driver in '
+ 'the keystone.endpoint_policy namespace.'),
],
'ldap': [
cfg.StrOpt('url', default='ldap://localhost',
@@ -561,18 +584,19 @@ FILE_OPTIONS = {
'Only enable this option if your LDAP server '
'supports subtree deletion.'),
cfg.StrOpt('query_scope', default='one',
- help='The LDAP scope for queries, this can be either '
- '"one" (onelevel/singleLevel) or "sub" '
- '(subtree/wholeSubtree).'),
+ choices=['one', 'sub'],
+ help='The LDAP scope for queries, "one" represents '
+ 'oneLevel/singleLevel and "sub" represents '
+ 'subtree/wholeSubtree options.'),
cfg.IntOpt('page_size', default=0,
help='Maximum results per page; a value of zero ("0") '
'disables paging.'),
cfg.StrOpt('alias_dereferencing', default='default',
- help='The LDAP dereferencing option for queries. This '
- 'can be either "never", "searching", "always", '
- '"finding" or "default". The "default" option falls '
- 'back to using default dereferencing configured by '
- 'your ldap.conf.'),
+ choices=['never', 'searching', 'always', 'finding',
+ 'default'],
+ help='The LDAP dereferencing option for queries. The '
+ '"default" option falls back to using default '
+ 'dereferencing configured by your ldap.conf.'),
cfg.IntOpt('debug_level',
help='Sets the LDAP debugging level for LDAP calls. '
'A value of 0 means that debugging is not enabled. '
@@ -582,7 +606,8 @@ FILE_OPTIONS = {
help='Override the system\'s default referral chasing '
'behavior for queries.'),
cfg.StrOpt('user_tree_dn',
- help='Search base for users.'),
+ help='Search base for users. '
+ 'Defaults to the suffix value.'),
cfg.StrOpt('user_filter',
help='LDAP search filter for users.'),
cfg.StrOpt('user_objectclass', default='inetOrgPerson',
@@ -622,7 +647,7 @@ FILE_OPTIONS = {
'the typical value is "512". This is typically used '
'when "user_enabled_attribute = userAccountControl".'),
cfg.ListOpt('user_attribute_ignore',
- default=['default_project_id', 'tenants'],
+ default=['default_project_id'],
help='List of attributes stripped off the user on '
'update.'),
cfg.StrOpt('user_default_project_id_attribute',
@@ -653,61 +678,76 @@ FILE_OPTIONS = {
cfg.StrOpt('project_tree_dn',
deprecated_opts=[cfg.DeprecatedOpt(
'tenant_tree_dn', group='ldap')],
- help='Search base for projects'),
+ deprecated_for_removal=True,
+ help='Search base for projects. '
+ 'Defaults to the suffix value.'),
cfg.StrOpt('project_filter',
deprecated_opts=[cfg.DeprecatedOpt(
'tenant_filter', group='ldap')],
+ deprecated_for_removal=True,
help='LDAP search filter for projects.'),
cfg.StrOpt('project_objectclass', default='groupOfNames',
deprecated_opts=[cfg.DeprecatedOpt(
'tenant_objectclass', group='ldap')],
+ deprecated_for_removal=True,
help='LDAP objectclass for projects.'),
cfg.StrOpt('project_id_attribute', default='cn',
deprecated_opts=[cfg.DeprecatedOpt(
'tenant_id_attribute', group='ldap')],
+ deprecated_for_removal=True,
help='LDAP attribute mapped to project id.'),
cfg.StrOpt('project_member_attribute', default='member',
deprecated_opts=[cfg.DeprecatedOpt(
'tenant_member_attribute', group='ldap')],
+ deprecated_for_removal=True,
help='LDAP attribute mapped to project membership for '
'user.'),
cfg.StrOpt('project_name_attribute', default='ou',
deprecated_opts=[cfg.DeprecatedOpt(
'tenant_name_attribute', group='ldap')],
+ deprecated_for_removal=True,
help='LDAP attribute mapped to project name.'),
cfg.StrOpt('project_desc_attribute', default='description',
deprecated_opts=[cfg.DeprecatedOpt(
'tenant_desc_attribute', group='ldap')],
+ deprecated_for_removal=True,
help='LDAP attribute mapped to project description.'),
cfg.StrOpt('project_enabled_attribute', default='enabled',
deprecated_opts=[cfg.DeprecatedOpt(
'tenant_enabled_attribute', group='ldap')],
+ deprecated_for_removal=True,
help='LDAP attribute mapped to project enabled.'),
cfg.StrOpt('project_domain_id_attribute',
deprecated_opts=[cfg.DeprecatedOpt(
'tenant_domain_id_attribute', group='ldap')],
+ deprecated_for_removal=True,
default='businessCategory',
help='LDAP attribute mapped to project domain_id.'),
cfg.ListOpt('project_attribute_ignore', default=[],
deprecated_opts=[cfg.DeprecatedOpt(
'tenant_attribute_ignore', group='ldap')],
+ deprecated_for_removal=True,
help='List of attributes stripped off the project on '
'update.'),
cfg.BoolOpt('project_allow_create', default=True,
deprecated_opts=[cfg.DeprecatedOpt(
'tenant_allow_create', group='ldap')],
+ deprecated_for_removal=True,
help='Allow project creation in LDAP backend.'),
cfg.BoolOpt('project_allow_update', default=True,
deprecated_opts=[cfg.DeprecatedOpt(
'tenant_allow_update', group='ldap')],
+ deprecated_for_removal=True,
help='Allow project update in LDAP backend.'),
cfg.BoolOpt('project_allow_delete', default=True,
deprecated_opts=[cfg.DeprecatedOpt(
'tenant_allow_delete', group='ldap')],
+ deprecated_for_removal=True,
help='Allow project deletion in LDAP backend.'),
cfg.BoolOpt('project_enabled_emulation', default=False,
deprecated_opts=[cfg.DeprecatedOpt(
'tenant_enabled_emulation', group='ldap')],
+ deprecated_for_removal=True,
help='If true, Keystone uses an alternative method to '
'determine if a project is enabled or not by '
'checking if they are a member of the '
@@ -715,11 +755,13 @@ FILE_OPTIONS = {
cfg.StrOpt('project_enabled_emulation_dn',
deprecated_opts=[cfg.DeprecatedOpt(
'tenant_enabled_emulation_dn', group='ldap')],
+ deprecated_for_removal=True,
help='DN of the group entry to hold enabled projects when '
'using enabled emulation.'),
cfg.ListOpt('project_additional_attribute_mapping',
deprecated_opts=[cfg.DeprecatedOpt(
'tenant_additional_attribute_mapping', group='ldap')],
+ deprecated_for_removal=True,
default=[],
help='Additional attribute mappings for projects. '
'Attribute mapping format is '
@@ -728,27 +770,39 @@ FILE_OPTIONS = {
'Identity API attribute.'),
cfg.StrOpt('role_tree_dn',
- help='Search base for roles.'),
+ deprecated_for_removal=True,
+ help='Search base for roles. '
+ 'Defaults to the suffix value.'),
cfg.StrOpt('role_filter',
+ deprecated_for_removal=True,
help='LDAP search filter for roles.'),
cfg.StrOpt('role_objectclass', default='organizationalRole',
+ deprecated_for_removal=True,
help='LDAP objectclass for roles.'),
cfg.StrOpt('role_id_attribute', default='cn',
+ deprecated_for_removal=True,
help='LDAP attribute mapped to role id.'),
cfg.StrOpt('role_name_attribute', default='ou',
+ deprecated_for_removal=True,
help='LDAP attribute mapped to role name.'),
cfg.StrOpt('role_member_attribute', default='roleOccupant',
+ deprecated_for_removal=True,
help='LDAP attribute mapped to role membership.'),
cfg.ListOpt('role_attribute_ignore', default=[],
+ deprecated_for_removal=True,
help='List of attributes stripped off the role on '
'update.'),
cfg.BoolOpt('role_allow_create', default=True,
+ deprecated_for_removal=True,
help='Allow role creation in LDAP backend.'),
cfg.BoolOpt('role_allow_update', default=True,
+ deprecated_for_removal=True,
help='Allow role update in LDAP backend.'),
cfg.BoolOpt('role_allow_delete', default=True,
+ deprecated_for_removal=True,
help='Allow role deletion in LDAP backend.'),
cfg.ListOpt('role_additional_attribute_mapping',
+ deprecated_for_removal=True,
default=[],
help='Additional attribute mappings for roles. Attribute '
'mapping format is <ldap_attr>:<user_attr>, where '
@@ -756,7 +810,8 @@ FILE_OPTIONS = {
'user_attr is the Identity API attribute.'),
cfg.StrOpt('group_tree_dn',
- help='Search base for groups.'),
+ help='Search base for groups. '
+ 'Defaults to the suffix value.'),
cfg.StrOpt('group_filter',
help='LDAP search filter for groups.'),
cfg.StrOpt('group_objectclass', default='groupOfNames',
@@ -794,8 +849,9 @@ FILE_OPTIONS = {
cfg.BoolOpt('use_tls', default=False,
help='Enable TLS for communicating with LDAP servers.'),
cfg.StrOpt('tls_req_cert', default='demand',
- help='Valid options for tls_req_cert are demand, never, '
- 'and allow.'),
+ choices=['demand', 'never', 'allow'],
+ help='Specifies what checks to perform on client '
+ 'certificates in an incoming TLS session.'),
cfg.BoolOpt('use_pool', default=False,
help='Enable LDAP connection pooling.'),
cfg.IntOpt('pool_size', default=10,
@@ -821,20 +877,22 @@ FILE_OPTIONS = {
],
'auth': [
cfg.ListOpt('methods', default=_DEFAULT_AUTH_METHODS,
- help='Default auth methods.'),
+ help='Allowed authentication methods.'),
cfg.StrOpt('password',
- default='keystone.auth.plugins.password.Password',
- help='The password auth plugin module.'),
+ help='Entrypoint for the password auth plugin module in '
+ 'the keystone.auth.password namespace.'),
cfg.StrOpt('token',
- default='keystone.auth.plugins.token.Token',
- help='The token auth plugin module.'),
+ help='Entrypoint for the token auth plugin module in the '
+ 'keystone.auth.token namespace.'),
# deals with REMOTE_USER authentication
cfg.StrOpt('external',
- default='keystone.auth.plugins.external.DefaultDomain',
- help='The external (REMOTE_USER) auth plugin module.'),
+ help='Entrypoint for the external (REMOTE_USER) auth '
+ 'plugin module in the keystone.auth.external '
+ 'namespace. Supplied drivers are DefaultDomain and '
+ 'Domain. The default driver is DefaultDomain.'),
cfg.StrOpt('oauth1',
- default='keystone.auth.plugins.oauth1.OAuth',
- help='The oAuth1.0 auth plugin module.'),
+ help='Entrypoint for the oAuth1.0 auth plugin module in '
+ 'the keystone.auth.oauth1 namespace.'),
],
'paste_deploy': [
cfg.StrOpt('config_file', default='keystone-paste.ini',
@@ -880,8 +938,10 @@ FILE_OPTIONS = {
help='Catalog template file name for use with the '
'template catalog backend.'),
cfg.StrOpt('driver',
- default='keystone.catalog.backends.sql.Catalog',
- help='Catalog backend driver.'),
+ default='sql',
+ help='Entrypoint for the catalog backend driver in the '
+ 'keystone.catalog namespace. Supplied drivers are '
+ 'kvs, sql, templated, and endpoint_filter.sql'),
cfg.BoolOpt('caching', default=True,
help='Toggle for catalog caching. This has no '
'effect unless global caching is enabled.'),
@@ -963,25 +1023,33 @@ FILE_OPTIONS = {
cfg.StrOpt('idp_contact_telephone',
help='Telephone number of contact person.'),
cfg.StrOpt('idp_contact_type', default='other',
- help='Contact type. Allowed values are: '
- 'technical, support, administrative '
- 'billing, and other'),
+ choices=['technical', 'support', 'administrative',
+ 'billing', 'other'],
+ help='The contact type describing the main point of '
+ 'contact for the identity provider.'),
cfg.StrOpt('idp_metadata_path',
default='/etc/keystone/saml2_idp_metadata.xml',
help='Path to the Identity Provider Metadata file. '
'This file should be generated with the '
'keystone-manage saml_idp_metadata command.'),
+ cfg.StrOpt('relay_state_prefix',
+ default='ss:mem:',
+ help='The prefix to use for the RelayState SAML '
+ 'attribute, used when generating ECP wrapped '
+ 'assertions.'),
],
'eventlet_server': [
cfg.IntOpt('public_workers',
deprecated_name='public_workers',
deprecated_group='DEFAULT',
+ deprecated_for_removal=True,
help='The number of worker processes to serve the public '
'eventlet application. Defaults to number of CPUs '
'(minimum of 2).'),
cfg.IntOpt('admin_workers',
deprecated_name='admin_workers',
deprecated_group='DEFAULT',
+ deprecated_for_removal=True,
help='The number of worker processes to serve the admin '
'eventlet application. Defaults to number of CPUs '
'(minimum of 2).'),
@@ -991,10 +1059,13 @@ FILE_OPTIONS = {
group='DEFAULT'),
cfg.DeprecatedOpt('public_bind_host',
group='DEFAULT'), ],
+ deprecated_for_removal=True,
help='The IP address of the network interface for the '
'public service to listen on.'),
- cfg.IntOpt('public_port', default=5000, deprecated_name='public_port',
+ cfg.IntOpt('public_port', default=5000, min=1, max=65535,
+ deprecated_name='public_port',
deprecated_group='DEFAULT',
+ deprecated_for_removal=True,
help='The port number which the public service listens '
'on.'),
cfg.StrOpt('admin_bind_host',
@@ -1003,15 +1074,28 @@ FILE_OPTIONS = {
group='DEFAULT'),
cfg.DeprecatedOpt('admin_bind_host',
group='DEFAULT')],
+ deprecated_for_removal=True,
help='The IP address of the network interface for the '
'admin service to listen on.'),
- cfg.IntOpt('admin_port', default=35357, deprecated_name='admin_port',
+ cfg.IntOpt('admin_port', default=35357, min=1, max=65535,
+ deprecated_name='admin_port',
deprecated_group='DEFAULT',
+ deprecated_for_removal=True,
help='The port number which the admin service listens '
'on.'),
+ cfg.BoolOpt('wsgi_keep_alive', default=True,
+ help="If set to false, disables keepalives on the server; "
+ "all connections will be closed after serving one "
+ "request."),
+ cfg.IntOpt('client_socket_timeout', default=900,
+ help="Timeout for socket operations on a client "
+ "connection. If an incoming connection is idle for "
+ "this number of seconds it will be closed. A value "
+ "of '0' means wait forever."),
cfg.BoolOpt('tcp_keepalive', default=False,
deprecated_name='tcp_keepalive',
deprecated_group='DEFAULT',
+ deprecated_for_removal=True,
help='Set this to true if you want to enable '
'TCP_KEEPALIVE on server sockets, i.e. sockets used '
'by the Keystone wsgi server for client '
@@ -1020,6 +1104,7 @@ FILE_OPTIONS = {
default=600,
deprecated_name='tcp_keepidle',
deprecated_group='DEFAULT',
+ deprecated_for_removal=True,
help='Sets the value of TCP_KEEPIDLE in seconds for each '
'server socket. Only applies if tcp_keepalive is '
'true.'),
@@ -1027,11 +1112,13 @@ FILE_OPTIONS = {
'eventlet_server_ssl': [
cfg.BoolOpt('enable', default=False, deprecated_name='enable',
deprecated_group='ssl',
+ deprecated_for_removal=True,
help='Toggle for SSL support on the Keystone '
'eventlet servers.'),
cfg.StrOpt('certfile',
default="/etc/keystone/ssl/certs/keystone.pem",
deprecated_name='certfile', deprecated_group='ssl',
+ deprecated_for_removal=True,
help='Path of the certfile for SSL. For non-production '
'environments, you may be interested in using '
'`keystone-manage ssl_setup` to generate self-signed '
@@ -1039,13 +1126,16 @@ FILE_OPTIONS = {
cfg.StrOpt('keyfile',
default='/etc/keystone/ssl/private/keystonekey.pem',
deprecated_name='keyfile', deprecated_group='ssl',
+ deprecated_for_removal=True,
help='Path of the keyfile for SSL.'),
cfg.StrOpt('ca_certs',
default='/etc/keystone/ssl/certs/ca.pem',
deprecated_name='ca_certs', deprecated_group='ssl',
+ deprecated_for_removal=True,
help='Path of the CA cert file for SSL.'),
cfg.BoolOpt('cert_required', default=False,
deprecated_name='cert_required', deprecated_group='ssl',
+ deprecated_for_removal=True,
help='Require client certificate.'),
],
}
@@ -1080,7 +1170,7 @@ def configure(conf=None):
cfg.StrOpt('pydev-debug-host',
help='Host to connect to for remote debugger.'))
conf.register_cli_opt(
- cfg.IntOpt('pydev-debug-port',
+ cfg.IntOpt('pydev-debug-port', min=1, max=65535,
help='Port to connect to for remote debugger.'))
for section in FILE_OPTIONS:
@@ -1115,4 +1205,4 @@ def list_opts():
:returns: a list of (group_name, opts) tuples
"""
- return FILE_OPTIONS.items()
+ return list(FILE_OPTIONS.items())
diff --git a/keystone-moon/keystone/common/controller.py b/keystone-moon/keystone/common/controller.py
index bd26b7c4..bc7074ac 100644
--- a/keystone-moon/keystone/common/controller.py
+++ b/keystone-moon/keystone/common/controller.py
@@ -17,6 +17,7 @@ import uuid
from oslo_config import cfg
from oslo_log import log
+from oslo_utils import strutils
import six
from keystone.common import authorization
@@ -39,7 +40,7 @@ def v2_deprecated(f):
This is a placeholder for the pending deprecation of v2. The implementation
of this decorator can be replaced with::
- from keystone.openstack.common import versionutils
+ from oslo_log import versionutils
v2_deprecated = versionutils.deprecated(
@@ -52,9 +53,12 @@ def v2_deprecated(f):
def _build_policy_check_credentials(self, action, context, kwargs):
+ kwargs_str = ', '.join(['%s=%s' % (k, kwargs[k]) for k in kwargs])
+ kwargs_str = strutils.mask_password(kwargs_str)
+
LOG.debug('RBAC: Authorizing %(action)s(%(kwargs)s)', {
'action': action,
- 'kwargs': ', '.join(['%s=%s' % (k, kwargs[k]) for k in kwargs])})
+ 'kwargs': kwargs_str})
# see if auth context has already been created. If so use it.
if ('environment' in context and
@@ -219,7 +223,11 @@ class V2Controller(wsgi.Application):
@staticmethod
def filter_domain_id(ref):
"""Remove domain_id since v2 calls are not domain-aware."""
- ref.pop('domain_id', None)
+ if 'domain_id' in ref:
+ if ref['domain_id'] != CONF.identity.default_domain_id:
+ raise exception.Unauthorized(
+ _('Non-default domain is not supported'))
+ del ref['domain_id']
return ref
@staticmethod
@@ -239,6 +247,18 @@ class V2Controller(wsgi.Application):
return ref
@staticmethod
+ def filter_project_parent_id(ref):
+ """Remove parent_id since v2 calls are not hierarchy-aware."""
+ ref.pop('parent_id', None)
+ return ref
+
+ @staticmethod
+ def filter_is_domain(ref):
+ """Remove is_domain field since v2 calls are not domain-aware."""
+ ref.pop('is_domain', None)
+ return ref
+
+ @staticmethod
def normalize_username_in_response(ref):
"""Adds username to outgoing user refs to match the v2 spec.
@@ -266,9 +286,12 @@ class V2Controller(wsgi.Application):
def v3_to_v2_user(ref):
"""Convert a user_ref from v3 to v2 compatible.
- * v2.0 users are not domain aware, and should have domain_id removed
- * v2.0 users expect the use of tenantId instead of default_project_id
- * v2.0 users have a username attribute
+ - v2.0 users are not domain aware, and should have domain_id validated
+ to be the default domain, and then removed.
+
+ - v2.0 users expect the use of tenantId instead of default_project_id.
+
+ - v2.0 users have a username attribute.
This method should only be applied to user_refs being returned from the
v2.0 controller(s).
@@ -304,6 +327,35 @@ class V2Controller(wsgi.Application):
else:
raise ValueError(_('Expected dict or list: %s') % type(ref))
+ @staticmethod
+ def v3_to_v2_project(ref):
+ """Convert a project_ref from v3 to v2.
+
+ * v2.0 projects are not domain aware, and should have domain_id removed
+ * v2.0 projects are not hierarchy aware, and should have parent_id
+ removed
+
+ This method should only be applied to project_refs being returned from
+ the v2.0 controller(s).
+
+ If ref is a list type, we will iterate through each element and do the
+ conversion.
+ """
+
+ def _filter_project_properties(ref):
+ """Run through the various filter methods."""
+ V2Controller.filter_domain_id(ref)
+ V2Controller.filter_project_parent_id(ref)
+ V2Controller.filter_is_domain(ref)
+ return ref
+
+ if isinstance(ref, dict):
+ return _filter_project_properties(ref)
+ elif isinstance(ref, list):
+ return [_filter_project_properties(x) for x in ref]
+ else:
+ raise ValueError(_('Expected dict or list: %s') % type(ref))
+
def format_project_list(self, tenant_refs, **kwargs):
"""Format a v2 style project list, including marker/limits."""
marker = kwargs.get('marker')
@@ -656,19 +708,7 @@ class V3Controller(wsgi.Application):
if context['query_string'].get('domain_id') is not None:
return context['query_string'].get('domain_id')
- try:
- token_ref = token_model.KeystoneToken(
- token_id=context['token_id'],
- token_data=self.token_provider_api.validate_token(
- context['token_id']))
- except KeyError:
- raise exception.ValidationError(
- _('domain_id is required as part of entity'))
- except (exception.TokenNotFound,
- exception.UnsupportedTokenVersionException):
- LOG.warning(_LW('Invalid token found while getting domain ID '
- 'for list request'))
- raise exception.Unauthorized()
+ token_ref = utils.get_token_ref(context)
if token_ref.domain_scoped:
return token_ref.domain_id
@@ -685,25 +725,7 @@ class V3Controller(wsgi.Application):
being used.
"""
- # We could make this more efficient by loading the domain_id
- # into the context in the wrapper function above (since
- # this version of normalize_domain will only be called inside
- # a v3 protected call). However, this optimization is probably not
- # worth the duplication of state
- try:
- token_ref = token_model.KeystoneToken(
- token_id=context['token_id'],
- token_data=self.token_provider_api.validate_token(
- context['token_id']))
- except KeyError:
- # This might happen if we use the Admin token, for instance
- raise exception.ValidationError(
- _('A domain-scoped token must be used'))
- except (exception.TokenNotFound,
- exception.UnsupportedTokenVersionException):
- LOG.warning(_LW('Invalid token found while getting domain ID '
- 'for list request'))
- raise exception.Unauthorized()
+ token_ref = utils.get_token_ref(context)
if token_ref.domain_scoped:
return token_ref.domain_id
diff --git a/keystone-moon/keystone/common/dependency.py b/keystone-moon/keystone/common/dependency.py
index 14a68f19..e19f705f 100644
--- a/keystone-moon/keystone/common/dependency.py
+++ b/keystone-moon/keystone/common/dependency.py
@@ -15,9 +15,9 @@
"""This module provides support for dependency injection.
Providers are registered via the ``@provider()`` decorator, and dependencies on
-them are registered with ``@requires()`` or ``@optional()``. Providers are
-available to their consumers via an attribute. See the documentation for the
-individual functions for more detail.
+them are registered with ``@requires()``. Providers are available to their
+consumers via an attribute. See the documentation for the individual functions
+for more detail.
See also:
@@ -27,16 +27,12 @@ See also:
import traceback
-import six
-
from keystone.i18n import _
-from keystone import notifications
_REGISTRY = {}
_future_dependencies = {}
-_future_optionals = {}
_factories = {}
@@ -94,44 +90,10 @@ def provider(name):
"""
def wrapper(cls):
def wrapped(init):
- def register_event_callbacks(self):
- # NOTE(morganfainberg): A provider who has an implicit
- # dependency on other providers may utilize the event callback
- # mechanism to react to any changes in those providers. This is
- # performed at the .provider() mechanism so that we can ensure
- # that the callback is only ever called once and guaranteed
- # to be on the properly configured and instantiated backend.
- if not hasattr(self, 'event_callbacks'):
- return
-
- if not isinstance(self.event_callbacks, dict):
- msg = _('event_callbacks must be a dict')
- raise ValueError(msg)
-
- for event in self.event_callbacks:
- if not isinstance(self.event_callbacks[event], dict):
- msg = _('event_callbacks[%s] must be a dict') % event
- raise ValueError(msg)
- for resource_type in self.event_callbacks[event]:
- # Make sure we register the provider for each event it
- # cares to call back.
- callbacks = self.event_callbacks[event][resource_type]
- if not callbacks:
- continue
- if not hasattr(callbacks, '__iter__'):
- # ensure the callback information is a list
- # allowing multiple callbacks to exist
- callbacks = [callbacks]
- notifications.register_event_callback(event,
- resource_type,
- callbacks)
-
def __wrapped_init__(self, *args, **kwargs):
"""Initialize the wrapped object and add it to the registry."""
init(self, *args, **kwargs)
_set_provider(name, self)
- register_event_callbacks(self)
-
resolve_future_dependencies(__provider_name=name)
return __wrapped_init__
@@ -157,7 +119,6 @@ def _process_dependencies(obj):
setattr(obj, dependency, get_provider(dependency))
process(obj, '_dependencies', _future_dependencies)
- process(obj, '_optionals', _future_optionals)
def requires(*dependencies):
@@ -210,34 +171,6 @@ def requires(*dependencies):
return wrapped
-def optional(*dependencies):
- """Similar to ``@requires()``, except that the dependencies are optional.
-
- If no provider is available, the attributes will be set to ``None``.
-
- """
- def wrapper(self, *args, **kwargs):
- """Inject each dependency from the registry."""
- self.__wrapped_init__(*args, **kwargs)
- _process_dependencies(self)
-
- def wrapped(cls):
- """Note the optional dependencies on the object for later injection.
-
- The dependencies of the parent class are combined with that of the
- child class to create a new set of dependencies.
-
- """
- existing_optionals = getattr(cls, '_optionals', set())
- cls._optionals = existing_optionals.union(dependencies)
- if not hasattr(cls, '__wrapped_init__'):
- cls.__wrapped_init__ = cls.__init__
- cls.__init__ = wrapper
- return cls
-
- return wrapped
-
-
def resolve_future_dependencies(__provider_name=None):
"""Forces injection of all dependencies.
@@ -259,29 +192,16 @@ def resolve_future_dependencies(__provider_name=None):
# A provider was registered, so take care of any objects depending on
# it.
targets = _future_dependencies.pop(__provider_name, [])
- targets.extend(_future_optionals.pop(__provider_name, []))
for target in targets:
setattr(target, __provider_name, get_provider(__provider_name))
return
- # Resolve optional dependencies, sets the attribute to None if there's no
- # provider registered.
- for dependency, targets in six.iteritems(_future_optionals.copy()):
- provider = get_provider(dependency, optional=GET_OPTIONAL)
- if provider is None:
- factory = _factories.get(dependency)
- if factory:
- provider = factory()
- new_providers[dependency] = provider
- for target in targets:
- setattr(target, dependency, provider)
-
# Resolve future dependencies, raises UnresolvableDependencyException if
# there's no provider registered.
try:
- for dependency, targets in six.iteritems(_future_dependencies.copy()):
+ for dependency, targets in _future_dependencies.copy().items():
if dependency not in _REGISTRY:
# a Class was registered that could fulfill the dependency, but
# it has not yet been initialized.
@@ -308,4 +228,3 @@ def reset():
_REGISTRY.clear()
_future_dependencies.clear()
- _future_optionals.clear()
diff --git a/keystone-moon/keystone/common/driver_hints.py b/keystone-moon/keystone/common/driver_hints.py
index 0361e314..ff0a774c 100644
--- a/keystone-moon/keystone/common/driver_hints.py
+++ b/keystone-moon/keystone/common/driver_hints.py
@@ -30,6 +30,10 @@ class Hints(object):
accessed publicly. Also it contains a dict called limit, which will
indicate the amount of data we want to limit our listing to.
+ If the filter is discovered to never match, then `cannot_match` can be set
+ to indicate that there will not be any matches and the backend work can be
+ short-circuited.
+
Each filter term consists of:
* ``name``: the name of the attribute being matched
@@ -44,6 +48,7 @@ class Hints(object):
def __init__(self):
self.limit = None
self.filters = list()
+ self.cannot_match = False
def add_filter(self, name, value, comparator='equals',
case_sensitive=False):
diff --git a/keystone-moon/keystone/common/environment/__init__.py b/keystone-moon/keystone/common/environment/__init__.py
index da1de890..3edf6b0b 100644
--- a/keystone-moon/keystone/common/environment/__init__.py
+++ b/keystone-moon/keystone/common/environment/__init__.py
@@ -17,6 +17,7 @@ import os
from oslo_log import log
+
LOG = log.getLogger(__name__)
@@ -93,7 +94,7 @@ def use_eventlet(monkeypatch_thread=None):
def use_stdlib():
global httplib, subprocess
- import httplib as _httplib
+ import six.moves.http_client as _httplib
import subprocess as _subprocess
httplib = _httplib
diff --git a/keystone-moon/keystone/common/environment/eventlet_server.py b/keystone-moon/keystone/common/environment/eventlet_server.py
index 639e074a..398952e1 100644
--- a/keystone-moon/keystone/common/environment/eventlet_server.py
+++ b/keystone-moon/keystone/common/environment/eventlet_server.py
@@ -25,12 +25,17 @@ import sys
import eventlet
import eventlet.wsgi
import greenlet
+from oslo_config import cfg
from oslo_log import log
from oslo_log import loggers
+from oslo_service import service
from keystone.i18n import _LE, _LI
+CONF = cfg.CONF
+
+
LOG = log.getLogger(__name__)
# The size of a pool that is used to spawn a single green thread in which
@@ -62,7 +67,7 @@ class EventletFilteringLogger(loggers.WritableLogger):
self.logger.log(self.level, msg.rstrip())
-class Server(object):
+class Server(service.ServiceBase):
"""Server class to manage multiple WSGI sockets and applications."""
def __init__(self, application, host=None, port=None, keepalive=False,
@@ -173,7 +178,7 @@ class Server(object):
The service interface is used by the launcher when receiving a
SIGHUP. The service interface is defined in
- keystone.openstack.common.service.Service.
+ oslo_service.service.Service.
Keystone does not need to do anything here.
"""
@@ -182,10 +187,17 @@ class Server(object):
def _run(self, application, socket):
"""Start a WSGI server with a new green thread pool."""
logger = log.getLogger('eventlet.wsgi.server')
+
+ # NOTE(dolph): [eventlet_server] client_socket_timeout is required to
+ # be an integer in keystone.conf, but in order to make
+ # eventlet.wsgi.server() wait forever, we pass None instead of 0.
+ socket_timeout = CONF.eventlet_server.client_socket_timeout or None
+
try:
- eventlet.wsgi.server(socket, application,
- log=EventletFilteringLogger(logger),
- debug=False)
+ eventlet.wsgi.server(
+ socket, application, log=EventletFilteringLogger(logger),
+ debug=False, keepalive=CONF.eventlet_server.wsgi_keep_alive,
+ socket_timeout=socket_timeout)
except greenlet.GreenletExit:
# Wait until all servers have completed running
pass
diff --git a/keystone-moon/keystone/common/json_home.py b/keystone-moon/keystone/common/json_home.py
index 215d596a..c048a356 100644
--- a/keystone-moon/keystone/common/json_home.py
+++ b/keystone-moon/keystone/common/json_home.py
@@ -13,7 +13,8 @@
# under the License.
-import six
+from keystone import exception
+from keystone.i18n import _
def build_v3_resource_relation(resource_name):
@@ -62,14 +63,24 @@ class Status(object):
STABLE = 'stable'
@classmethod
- def is_supported(cls, status):
- return status in [cls.DEPRECATED, cls.EXPERIMENTAL, cls.STABLE]
+ def update_resource_data(cls, resource_data, status):
+ if status is cls.STABLE:
+ # We currently do not add a status if the resource is stable, the
+ # absence of the status property can be taken as meaning that the
+ # resource is stable.
+ return
+ if status is cls.DEPRECATED or status is cls.EXPERIMENTAL:
+ resource_data['hints'] = {'status': status}
+ return
+
+ raise exception.Error(message=_(
+ 'Unexpected status requested for JSON Home response, %s') % status)
def translate_urls(json_home, new_prefix):
"""Given a JSON Home document, sticks new_prefix on each of the urls."""
- for dummy_rel, resource in six.iteritems(json_home['resources']):
+ for dummy_rel, resource in json_home['resources'].items():
if 'href' in resource:
resource['href'] = new_prefix + resource['href']
elif 'href-template' in resource:
diff --git a/keystone-moon/keystone/common/kvs/backends/memcached.py b/keystone-moon/keystone/common/kvs/backends/memcached.py
index db453143..f54c1a01 100644
--- a/keystone-moon/keystone/common/kvs/backends/memcached.py
+++ b/keystone-moon/keystone/common/kvs/backends/memcached.py
@@ -23,9 +23,9 @@ from dogpile.cache import api
from dogpile.cache.backends import memcached
from oslo_config import cfg
from oslo_log import log
+from six.moves import range
from keystone.common.cache.backends import memcache_pool
-from keystone.common import manager
from keystone import exception
from keystone.i18n import _
@@ -73,12 +73,13 @@ class MemcachedLock(object):
client.delete(self.key)
-class MemcachedBackend(manager.Manager):
+class MemcachedBackend(object):
"""Pivot point to leverage the various dogpile.cache memcached backends.
- To specify a specific dogpile.cache memcached driver, pass the argument
- `memcached_driver` set to one of the provided memcached drivers (at this
- time `memcached`, `bmemcached`, `pylibmc` are valid).
+ To specify a specific dogpile.cache memcached backend, pass the argument
+ `memcached_backend` set to one of the provided memcached backends (at this
+ time `memcached`, `bmemcached`, `pylibmc` and `pooled_memcached` are
+ valid).
"""
def __init__(self, arguments):
self._key_mangler = None
@@ -105,13 +106,19 @@ class MemcachedBackend(manager.Manager):
else:
if backend not in VALID_DOGPILE_BACKENDS:
raise ValueError(
- _('Backend `%(driver)s` is not a valid memcached '
- 'backend. Valid drivers: %(driver_list)s') %
- {'driver': backend,
- 'driver_list': ','.join(VALID_DOGPILE_BACKENDS.keys())})
+ _('Backend `%(backend)s` is not a valid memcached '
+ 'backend. Valid backends: %(backend_list)s') %
+ {'backend': backend,
+ 'backend_list': ','.join(VALID_DOGPILE_BACKENDS.keys())})
else:
self.driver = VALID_DOGPILE_BACKENDS[backend](arguments)
+ def __getattr__(self, name):
+ """Forward calls to the underlying driver."""
+ f = getattr(self.driver, name)
+ setattr(self, name, f)
+ return f
+
def _get_set_arguments_driver_attr(self, exclude_expiry=False):
# NOTE(morganfainberg): Shallow copy the .set_arguments dict to
diff --git a/keystone-moon/keystone/common/kvs/core.py b/keystone-moon/keystone/common/kvs/core.py
index cbbb7462..6ce7b318 100644
--- a/keystone-moon/keystone/common/kvs/core.py
+++ b/keystone-moon/keystone/common/kvs/core.py
@@ -25,7 +25,6 @@ from dogpile.core import nameregistry
from oslo_config import cfg
from oslo_log import log
from oslo_utils import importutils
-import six
from keystone import exception
from keystone.i18n import _
@@ -147,24 +146,24 @@ class KeyValueStore(object):
self._region.name)
def _set_keymangler_on_backend(self, key_mangler):
- try:
- self._region.backend.key_mangler = key_mangler
- except Exception as e:
- # NOTE(morganfainberg): The setting of the key_mangler on the
- # backend is used to allow the backend to
- # calculate a hashed key value as needed. Not all backends
- # require the ability to calculate hashed keys. If the
- # backend does not support/require this feature log a
- # debug line and move on otherwise raise the proper exception.
- # Support of the feature is implied by the existence of the
- # 'raw_no_expiry_keys' attribute.
- if not hasattr(self._region.backend, 'raw_no_expiry_keys'):
- LOG.debug(('Non-expiring keys not supported/required by '
- '%(region)s backend; unable to set '
- 'key_mangler for backend: %(err)s'),
- {'region': self._region.name, 'err': e})
- else:
- raise
+ try:
+ self._region.backend.key_mangler = key_mangler
+ except Exception as e:
+ # NOTE(morganfainberg): The setting of the key_mangler on the
+ # backend is used to allow the backend to
+ # calculate a hashed key value as needed. Not all backends
+ # require the ability to calculate hashed keys. If the
+ # backend does not support/require this feature log a
+ # debug line and move on otherwise raise the proper exception.
+ # Support of the feature is implied by the existence of the
+ # 'raw_no_expiry_keys' attribute.
+ if not hasattr(self._region.backend, 'raw_no_expiry_keys'):
+ LOG.debug(('Non-expiring keys not supported/required by '
+ '%(region)s backend; unable to set '
+ 'key_mangler for backend: %(err)s'),
+ {'region': self._region.name, 'err': e})
+ else:
+ raise
def _set_key_mangler(self, key_mangler):
# Set the key_mangler that is appropriate for the given region being
@@ -232,7 +231,7 @@ class KeyValueStore(object):
if config_args['lock_timeout'] > 0:
config_args['lock_timeout'] += LOCK_WINDOW
- for argument, value in six.iteritems(config_args):
+ for argument, value in config_args.items():
arg_key = '.'.join([prefix, 'arguments', argument])
conf_dict[arg_key] = value
diff --git a/keystone-moon/keystone/common/kvs/legacy.py b/keystone-moon/keystone/common/kvs/legacy.py
index ba036016..7e27d97f 100644
--- a/keystone-moon/keystone/common/kvs/legacy.py
+++ b/keystone-moon/keystone/common/kvs/legacy.py
@@ -12,8 +12,9 @@
# License for the specific language governing permissions and limitations
# under the License.
+from oslo_log import versionutils
+
from keystone import exception
-from keystone.openstack.common import versionutils
class DictKvs(dict):
diff --git a/keystone-moon/keystone/common/ldap/core.py b/keystone-moon/keystone/common/ldap/core.py
index 144c0cfd..0bb3830c 100644
--- a/keystone-moon/keystone/common/ldap/core.py
+++ b/keystone-moon/keystone/common/ldap/core.py
@@ -24,11 +24,13 @@ import ldap.filter
import ldappool
from oslo_log import log
import six
+from six.moves import map, zip
from keystone import exception
from keystone.i18n import _
from keystone.i18n import _LW
+
LOG = log.getLogger(__name__)
LDAP_VALUES = {'TRUE': True, 'FALSE': False}
@@ -159,7 +161,7 @@ def convert_ldap_result(ldap_result):
at_least_one_referral = True
continue
- for kind, values in six.iteritems(attrs):
+ for kind, values in attrs.items():
try:
val2py = enabled2py if kind == 'enabled' else ldap2py
ldap_attrs[kind] = [val2py(x) for x in values]
@@ -327,7 +329,7 @@ def dn_startswith(descendant_dn, dn):
@six.add_metaclass(abc.ABCMeta)
class LDAPHandler(object):
- '''Abstract class which defines methods for a LDAP API provider.
+ """Abstract class which defines methods for a LDAP API provider.
Native Keystone values cannot be passed directly into and from the
python-ldap API. Type conversion must occur at the LDAP API
@@ -415,7 +417,8 @@ class LDAPHandler(object):
method to any derivations of the abstract class the code will fail
to load and run making it impossible to forget updating all the
derived classes.
- '''
+
+ """
@abc.abstractmethod
def __init__(self, conn=None):
self.conn = conn
@@ -481,13 +484,13 @@ class LDAPHandler(object):
class PythonLDAPHandler(LDAPHandler):
- '''Implementation of the LDAPHandler interface which calls the
- python-ldap API.
+ """LDAPHandler implementation which calls the python-ldap API.
- Note, the python-ldap API requires all string values to be UTF-8
- encoded. The KeystoneLDAPHandler enforces this prior to invoking
- the methods in this class.
- '''
+ Note, the python-ldap API requires all string values to be UTF-8 encoded.
+ The KeystoneLDAPHandler enforces this prior to invoking the methods in this
+ class.
+
+ """
def __init__(self, conn=None):
super(PythonLDAPHandler, self).__init__(conn=conn)
@@ -569,10 +572,7 @@ class PythonLDAPHandler(LDAPHandler):
def _common_ldap_initialization(url, use_tls=False, tls_cacertfile=None,
tls_cacertdir=None, tls_req_cert=None,
debug_level=None):
- '''Method for common ldap initialization between PythonLDAPHandler and
- PooledLDAPHandler.
- '''
-
+ """LDAP initialization for PythonLDAPHandler and PooledLDAPHandler."""
LOG.debug("LDAP init: url=%s", url)
LOG.debug('LDAP init: use_tls=%s tls_cacertfile=%s tls_cacertdir=%s '
'tls_req_cert=%s tls_avail=%s',
@@ -616,7 +616,7 @@ def _common_ldap_initialization(url, use_tls=False, tls_cacertfile=None,
"or is not a directory") %
tls_cacertdir)
ldap.set_option(ldap.OPT_X_TLS_CACERTDIR, tls_cacertdir)
- if tls_req_cert in LDAP_TLS_CERTS.values():
+ if tls_req_cert in list(LDAP_TLS_CERTS.values()):
ldap.set_option(ldap.OPT_X_TLS_REQUIRE_CERT, tls_req_cert)
else:
LOG.debug("LDAP TLS: invalid TLS_REQUIRE_CERT Option=%s",
@@ -624,15 +624,16 @@ def _common_ldap_initialization(url, use_tls=False, tls_cacertfile=None,
class MsgId(list):
- '''Wrapper class to hold connection and msgid.'''
+ """Wrapper class to hold connection and msgid."""
pass
def use_conn_pool(func):
- '''Use this only for connection pool specific ldap API.
+ """Use this only for connection pool specific ldap API.
This adds connection object to decorated API as next argument after self.
- '''
+
+ """
def wrapper(self, *args, **kwargs):
# assert isinstance(self, PooledLDAPHandler)
with self._get_pool_connection() as conn:
@@ -642,8 +643,7 @@ def use_conn_pool(func):
class PooledLDAPHandler(LDAPHandler):
- '''Implementation of the LDAPHandler interface which uses pooled
- connection manager.
+ """LDAPHandler implementation which uses pooled connection manager.
Pool specific configuration is defined in [ldap] section.
All other LDAP configuration is still used from [ldap] section
@@ -663,8 +663,8 @@ class PooledLDAPHandler(LDAPHandler):
Note, the python-ldap API requires all string values to be UTF-8
encoded. The KeystoneLDAPHandler enforces this prior to invoking
the methods in this class.
- '''
+ """
# Added here to allow override for testing
Connector = ldappool.StateConnector
auth_pool_prefix = 'auth_pool_'
@@ -737,7 +737,7 @@ class PooledLDAPHandler(LDAPHandler):
# if connection has a lifetime, then it already has options specified
if conn.get_lifetime() > 30:
return
- for option, invalue in six.iteritems(self.conn_options):
+ for option, invalue in self.conn_options.items():
conn.set_option(option, invalue)
def _get_pool_connection(self):
@@ -745,9 +745,8 @@ class PooledLDAPHandler(LDAPHandler):
def simple_bind_s(self, who='', cred='',
serverctrls=None, clientctrls=None):
- '''Not using use_conn_pool decorator here as this API takes cred as
- input.
- '''
+ # Not using use_conn_pool decorator here as this API takes cred as
+ # input.
self.who = who
self.cred = cred
with self._get_pool_connection() as conn:
@@ -773,16 +772,17 @@ class PooledLDAPHandler(LDAPHandler):
filterstr='(objectClass=*)', attrlist=None, attrsonly=0,
serverctrls=None, clientctrls=None,
timeout=-1, sizelimit=0):
- '''This API is asynchoronus API which returns MsgId instance to be used
- in result3 call.
+ """Asynchronous API to return a ``MsgId`` instance.
+
+ The ``MsgId`` instance can be safely used in a call to ``result3()``.
- To work with result3 API in predicatable manner, same LDAP connection
- is needed which provided msgid. So wrapping used connection and msgid
- in MsgId class. The connection associated with search_ext is released
- once last hard reference to MsgId object is freed. This will happen
- when the method is done with returned MsgId usage.
- '''
+ To work with ``result3()`` API in predictable manner, the same LDAP
+ connection is needed which originally provided the ``msgid``. So, this
+ method wraps the existing connection and ``msgid`` in a new ``MsgId``
+ instance. The connection associated with ``search_ext`` is released
+ once last hard reference to the ``MsgId`` instance is freed.
+ """
conn_ctxt = self._get_pool_connection()
conn = conn_ctxt.__enter__()
try:
@@ -800,11 +800,12 @@ class PooledLDAPHandler(LDAPHandler):
def result3(self, msgid, all=1, timeout=None,
resp_ctrl_classes=None):
- '''This method is used to wait for and return the result of an
- operation previously initiated by one of the LDAP asynchronous
- operation routines (eg search_ext()) It returned an invocation
- identifier (a message id) upon successful initiation of their
- operation.
+ """This method is used to wait for and return result.
+
+ This method returns the result of an operation previously initiated by
+ one of the LDAP asynchronous operation routines (eg search_ext()). It
+ returned an invocation identifier (a message id) upon successful
+ initiation of their operation.
Input msgid is expected to be instance of class MsgId which has LDAP
session/connection used to execute search_ext and message idenfier.
@@ -812,7 +813,8 @@ class PooledLDAPHandler(LDAPHandler):
The connection associated with search_ext is released once last hard
reference to MsgId object is freed. This will happen when function
which requested msgId and used it in result3 exits.
- '''
+
+ """
conn, msg_id = msgid
return conn.result3(msg_id, all, timeout)
@@ -831,7 +833,7 @@ class PooledLDAPHandler(LDAPHandler):
class KeystoneLDAPHandler(LDAPHandler):
- '''Convert data types and perform logging.
+ """Convert data types and perform logging.
This LDAP inteface wraps the python-ldap based interfaces. The
python-ldap interfaces require string values encoded in UTF-8. The
@@ -854,7 +856,8 @@ class KeystoneLDAPHandler(LDAPHandler):
Data returned from the LDAP call is converted back from UTF-8
encoded strings into the Python data type used internally in
OpenStack.
- '''
+
+ """
def __init__(self, conn=None):
super(KeystoneLDAPHandler, self).__init__(conn=conn)
@@ -938,7 +941,7 @@ class KeystoneLDAPHandler(LDAPHandler):
if attrlist is None:
attrlist_utf8 = None
else:
- attrlist_utf8 = map(utf8_encode, attrlist)
+ attrlist_utf8 = list(map(utf8_encode, attrlist))
ldap_result = self.conn.search_s(base_utf8, scope,
filterstr_utf8,
attrlist_utf8, attrsonly)
@@ -989,7 +992,7 @@ class KeystoneLDAPHandler(LDAPHandler):
attrlist_utf8 = None
else:
attrlist = [attr for attr in attrlist if attr is not None]
- attrlist_utf8 = map(utf8_encode, attrlist)
+ attrlist_utf8 = list(map(utf8_encode, attrlist))
msgid = self.conn.search_ext(base_utf8,
scope,
filterstr_utf8,
@@ -1083,7 +1086,7 @@ def register_handler(prefix, handler):
def _get_connection(conn_url, use_pool=False, use_auth_pool=False):
- for prefix, handler in six.iteritems(_HANDLERS):
+ for prefix, handler in _HANDLERS.items():
if conn_url.startswith(prefix):
return handler()
@@ -1109,7 +1112,6 @@ def filter_entity(entity_ref):
class BaseLdap(object):
- DEFAULT_SUFFIX = "dc=example,dc=com"
DEFAULT_OU = None
DEFAULT_STRUCTURAL_CLASSES = None
DEFAULT_ID_ATTR = 'cn'
@@ -1156,8 +1158,6 @@ class BaseLdap(object):
if self.options_name is not None:
self.suffix = conf.ldap.suffix
- if self.suffix is None:
- self.suffix = self.DEFAULT_SUFFIX
dn = '%s_tree_dn' % self.options_name
self.tree_dn = (getattr(conf.ldap, dn)
or '%s,%s' % (self.DEFAULT_OU, self.suffix))
@@ -1169,7 +1169,7 @@ class BaseLdap(object):
self.object_class = (getattr(conf.ldap, objclass)
or self.DEFAULT_OBJECTCLASS)
- for k, v in six.iteritems(self.attribute_options_names):
+ for k, v in self.attribute_options_names.items():
v = '%s_%s_attribute' % (self.options_name, v)
self.attribute_mapping[k] = getattr(conf.ldap, v)
@@ -1318,7 +1318,7 @@ class BaseLdap(object):
# in a case-insensitive way. We use the case specified in the
# mapping for the model to ensure we have a predictable way of
# retrieving values later.
- lower_res = {k.lower(): v for k, v in six.iteritems(res[1])}
+ lower_res = {k.lower(): v for k, v in res[1].items()}
id_attrs = lower_res.get(self.id_attr.lower())
if not id_attrs:
@@ -1404,7 +1404,7 @@ class BaseLdap(object):
self.affirm_unique(values)
object_classes = self.structural_classes + [self.object_class]
attrs = [('objectClass', object_classes)]
- for k, v in six.iteritems(values):
+ for k, v in values.items():
if k in self.attribute_ignore:
continue
if k == 'id':
@@ -1416,7 +1416,7 @@ class BaseLdap(object):
if attr_type is not None:
attrs.append((attr_type, [v]))
extra_attrs = [attr for attr, name
- in six.iteritems(self.extra_attr_mapping)
+ in self.extra_attr_mapping.items()
if name == k]
for attr in extra_attrs:
attrs.append((attr, [v]))
@@ -1439,8 +1439,8 @@ class BaseLdap(object):
with self.get_connection() as conn:
try:
attrs = list(set(([self.id_attr] +
- self.attribute_mapping.values() +
- self.extra_attr_mapping.keys())))
+ list(self.attribute_mapping.values()) +
+ list(self.extra_attr_mapping.keys()))))
res = conn.search_s(self.tree_dn,
self.LDAP_SCOPE,
query,
@@ -1453,14 +1453,15 @@ class BaseLdap(object):
return None
def _ldap_get_all(self, ldap_filter=None):
- query = u'(&%s(objectClass=%s))' % (ldap_filter or
- self.ldap_filter or
- '', self.object_class)
+ query = u'(&%s(objectClass=%s)(%s=*))' % (
+ ldap_filter or self.ldap_filter or '',
+ self.object_class,
+ self.id_attr)
with self.get_connection() as conn:
try:
attrs = list(set(([self.id_attr] +
- self.attribute_mapping.values() +
- self.extra_attr_mapping.keys())))
+ list(self.attribute_mapping.values()) +
+ list(self.extra_attr_mapping.keys()))))
return conn.search_s(self.tree_dn,
self.LDAP_SCOPE,
query,
@@ -1479,7 +1480,7 @@ class BaseLdap(object):
query = (u'(&%s%s)' %
(query, ''.join([calc_filter(k, v) for k, v in
- six.iteritems(query_params)])))
+ query_params.items()])))
with self.get_connection() as conn:
return conn.search_s(search_base, scope, query, attrlist)
@@ -1509,7 +1510,7 @@ class BaseLdap(object):
old_obj = self.get(object_id)
modlist = []
- for k, v in six.iteritems(values):
+ for k, v in values.items():
if k == 'id':
# id can't be modified.
continue
@@ -1648,7 +1649,7 @@ class BaseLdap(object):
(query, ''.join(['(%s=%s)'
% (k, ldap.filter.escape_filter_chars(v))
for k, v in
- six.iteritems(query_params)])))
+ query_params.items()])))
not_deleted_nodes = []
with self.get_connection() as conn:
try:
@@ -1738,6 +1739,11 @@ class BaseLdap(object):
return query_term
+ if query is None:
+ # make sure query is a string so the ldap filter is properly
+ # constructed from filter_list later
+ query = ''
+
if hints is None:
return query
@@ -1799,25 +1805,24 @@ class EnabledEmuMixIn(BaseLdap):
utf8_decode(naming_rdn[1]))
self.enabled_emulation_naming_attr = naming_attr
- def _get_enabled(self, object_id):
+ def _get_enabled(self, object_id, conn):
dn = self._id_to_dn(object_id)
query = '(member=%s)' % dn
- with self.get_connection() as conn:
- try:
- enabled_value = conn.search_s(self.enabled_emulation_dn,
- ldap.SCOPE_BASE,
- query, ['cn'])
- except ldap.NO_SUCH_OBJECT:
- return False
- else:
- return bool(enabled_value)
+ try:
+ enabled_value = conn.search_s(self.enabled_emulation_dn,
+ ldap.SCOPE_BASE,
+ query, attrlist=DN_ONLY)
+ except ldap.NO_SUCH_OBJECT:
+ return False
+ else:
+ return bool(enabled_value)
def _add_enabled(self, object_id):
- if not self._get_enabled(object_id):
- modlist = [(ldap.MOD_ADD,
- 'member',
- [self._id_to_dn(object_id)])]
- with self.get_connection() as conn:
+ with self.get_connection() as conn:
+ if not self._get_enabled(object_id, conn):
+ modlist = [(ldap.MOD_ADD,
+ 'member',
+ [self._id_to_dn(object_id)])]
try:
conn.modify_s(self.enabled_emulation_dn, modlist)
except ldap.NO_SUCH_OBJECT:
@@ -1851,10 +1856,12 @@ class EnabledEmuMixIn(BaseLdap):
return super(EnabledEmuMixIn, self).create(values)
def get(self, object_id, ldap_filter=None):
- ref = super(EnabledEmuMixIn, self).get(object_id, ldap_filter)
- if 'enabled' not in self.attribute_ignore and self.enabled_emulation:
- ref['enabled'] = self._get_enabled(object_id)
- return ref
+ with self.get_connection() as conn:
+ ref = super(EnabledEmuMixIn, self).get(object_id, ldap_filter)
+ if ('enabled' not in self.attribute_ignore and
+ self.enabled_emulation):
+ ref['enabled'] = self._get_enabled(object_id, conn)
+ return ref
def get_all(self, ldap_filter=None):
if 'enabled' not in self.attribute_ignore and self.enabled_emulation:
@@ -1862,8 +1869,10 @@ class EnabledEmuMixIn(BaseLdap):
tenant_list = [self._ldap_res_to_model(x)
for x in self._ldap_get_all(ldap_filter)
if x[0] != self.enabled_emulation_dn]
- for tenant_ref in tenant_list:
- tenant_ref['enabled'] = self._get_enabled(tenant_ref['id'])
+ with self.get_connection() as conn:
+ for tenant_ref in tenant_list:
+ tenant_ref['enabled'] = self._get_enabled(
+ tenant_ref['id'], conn)
return tenant_list
else:
return super(EnabledEmuMixIn, self).get_all(ldap_filter)
diff --git a/keystone-moon/keystone/common/manager.py b/keystone-moon/keystone/common/manager.py
index 28bf2efb..7150fbf3 100644
--- a/keystone-moon/keystone/common/manager.py
+++ b/keystone-moon/keystone/common/manager.py
@@ -14,7 +14,13 @@
import functools
+from oslo_log import log
+from oslo_log import versionutils
from oslo_utils import importutils
+import stevedore
+
+
+LOG = log.getLogger(__name__)
def response_truncated(f):
@@ -53,6 +59,28 @@ def response_truncated(f):
return wrapper
+def load_driver(namespace, driver_name, *args):
+ try:
+ driver_manager = stevedore.DriverManager(namespace,
+ driver_name,
+ invoke_on_load=True,
+ invoke_args=args)
+ return driver_manager.driver
+ except RuntimeError as e:
+ LOG.debug('Failed to load %r using stevedore: %s', driver_name, e)
+ # Ignore failure and continue on.
+
+ @versionutils.deprecated(as_of=versionutils.deprecated.LIBERTY,
+ in_favor_of='entrypoints',
+ what='direct import of driver')
+ def _load_using_import(driver_name, *args):
+ return importutils.import_object(driver_name, *args)
+
+ # For backwards-compatibility, an unregistered class reference can
+ # still be used.
+ return _load_using_import(driver_name, *args)
+
+
class Manager(object):
"""Base class for intermediary request layer.
@@ -66,8 +94,10 @@ class Manager(object):
"""
+ driver_namespace = None
+
def __init__(self, driver_name):
- self.driver = importutils.import_object(driver_name)
+ self.driver = load_driver(self.driver_namespace, driver_name)
def __getattr__(self, name):
"""Forward calls to the underlying driver."""
diff --git a/keystone-moon/keystone/common/models.py b/keystone-moon/keystone/common/models.py
index 3b3aabe1..0bb37319 100644
--- a/keystone-moon/keystone/common/models.py
+++ b/keystone-moon/keystone/common/models.py
@@ -130,11 +130,12 @@ class Project(Model):
Optional Keys:
description
enabled (bool, default True)
+ is_domain (bool, default False)
"""
required_keys = ('id', 'name', 'domain_id')
- optional_keys = ('description', 'enabled')
+ optional_keys = ('description', 'enabled', 'is_domain')
class Role(Model):
diff --git a/keystone-moon/keystone/common/openssl.py b/keystone-moon/keystone/common/openssl.py
index 4eb7d1d1..be56b9cc 100644
--- a/keystone-moon/keystone/common/openssl.py
+++ b/keystone-moon/keystone/common/openssl.py
@@ -20,7 +20,7 @@ from oslo_log import log
from keystone.common import environment
from keystone.common import utils
-from keystone.i18n import _LI, _LE
+from keystone.i18n import _LI, _LE, _LW
LOG = log.getLogger(__name__)
CONF = cfg.CONF
@@ -70,8 +70,8 @@ class BaseCertificateConfigure(object):
if "OpenSSL 0." in openssl_ver:
self.ssl_dictionary['default_md'] = 'sha1'
except OSError:
- LOG.warn('Failed to invoke ``openssl version``, '
- 'assuming is v1.0 or newer')
+ LOG.warn(_LW('Failed to invoke ``openssl version``, '
+ 'assuming is v1.0 or newer'))
self.ssl_dictionary.update(kwargs)
def exec_command(self, command):
diff --git a/keystone-moon/keystone/common/sql/core.py b/keystone-moon/keystone/common/sql/core.py
index bf168701..ebd61bb7 100644
--- a/keystone-moon/keystone/common/sql/core.py
+++ b/keystone-moon/keystone/common/sql/core.py
@@ -239,6 +239,39 @@ def truncated(f):
return wrapper
+class _WontMatch(Exception):
+ """Raised to indicate that the filter won't match.
+
+ This is raised to short-circuit the computation of the filter as soon as
+ it's discovered that the filter requested isn't going to match anything.
+
+ A filter isn't going to match anything if the value is too long for the
+ field, for example.
+
+ """
+
+ @classmethod
+ def check(cls, value, col_attr):
+ """Check if the value can match given the column attributes.
+
+ Raises this class if the value provided can't match any value in the
+ column in the table given the column's attributes. For example, if the
+ column is a string and the value is longer than the column then it
+ won't match any value in the column in the table.
+
+ """
+ col = col_attr.property.columns[0]
+ if isinstance(col.type, sql.types.Boolean):
+ # The column is a Boolean, we should have already validated input.
+ return
+ if not col.type.length:
+ # The column doesn't have a length so can't validate anymore.
+ return
+ if len(value) > col.type.length:
+ raise cls()
+ # Otherwise the value could match a value in the column.
+
+
def _filter(model, query, hints):
"""Applies filtering to a query.
@@ -251,16 +284,14 @@ def _filter(model, query, hints):
:returns query: query, updated with any filters satisfied
"""
- def inexact_filter(model, query, filter_, satisfied_filters, hints):
+ def inexact_filter(model, query, filter_, satisfied_filters):
"""Applies an inexact filter to a query.
:param model: the table model in question
:param query: query to apply filters to
- :param filter_: the dict that describes this filter
- :param satisfied_filters: a cumulative list of satisfied filters, to
- which filter_ will be added if it is
- satisfied.
- :param hints: contains the list of filters yet to be satisfied.
+ :param dict filter_: describes this filter
+ :param list satisfied_filters: filter_ will be added if it is
+ satisfied.
:returns query: query updated to add any inexact filters we could
satisfy
@@ -278,10 +309,13 @@ def _filter(model, query, hints):
return query
if filter_['comparator'] == 'contains':
+ _WontMatch.check(filter_['value'], column_attr)
query_term = column_attr.ilike('%%%s%%' % filter_['value'])
elif filter_['comparator'] == 'startswith':
+ _WontMatch.check(filter_['value'], column_attr)
query_term = column_attr.ilike('%s%%' % filter_['value'])
elif filter_['comparator'] == 'endswith':
+ _WontMatch.check(filter_['value'], column_attr)
query_term = column_attr.ilike('%%%s' % filter_['value'])
else:
# It's a filter we don't understand, so let the caller
@@ -291,53 +325,50 @@ def _filter(model, query, hints):
satisfied_filters.append(filter_)
return query.filter(query_term)
- def exact_filter(
- model, filter_, satisfied_filters, cumulative_filter_dict, hints):
+ def exact_filter(model, filter_, cumulative_filter_dict):
"""Applies an exact filter to a query.
:param model: the table model in question
- :param filter_: the dict that describes this filter
- :param satisfied_filters: a cumulative list of satisfied filters, to
- which filter_ will be added if it is
- satisfied.
- :param cumulative_filter_dict: a dict that describes the set of
- exact filters built up so far
- :param hints: contains the list of filters yet to be satisfied.
-
- :returns: updated cumulative dict
+ :param dict filter_: describes this filter
+ :param dict cumulative_filter_dict: describes the set of exact filters
+ built up so far
"""
key = filter_['name']
- if isinstance(getattr(model, key).property.columns[0].type,
- sql.types.Boolean):
+
+ col = getattr(model, key)
+ if isinstance(col.property.columns[0].type, sql.types.Boolean):
cumulative_filter_dict[key] = (
utils.attr_as_boolean(filter_['value']))
else:
+ _WontMatch.check(filter_['value'], col)
cumulative_filter_dict[key] = filter_['value']
- satisfied_filters.append(filter_)
- return cumulative_filter_dict
-
- filter_dict = {}
- satisfied_filters = []
- for filter_ in hints.filters:
- if filter_['name'] not in model.attributes:
- continue
- if filter_['comparator'] == 'equals':
- filter_dict = exact_filter(
- model, filter_, satisfied_filters, filter_dict, hints)
- else:
- query = inexact_filter(
- model, query, filter_, satisfied_filters, hints)
- # Apply any exact filters we built up
- if filter_dict:
- query = query.filter_by(**filter_dict)
+ try:
+ filter_dict = {}
+ satisfied_filters = []
+ for filter_ in hints.filters:
+ if filter_['name'] not in model.attributes:
+ continue
+ if filter_['comparator'] == 'equals':
+ exact_filter(model, filter_, filter_dict)
+ satisfied_filters.append(filter_)
+ else:
+ query = inexact_filter(model, query, filter_,
+ satisfied_filters)
+
+ # Apply any exact filters we built up
+ if filter_dict:
+ query = query.filter_by(**filter_dict)
+
+ # Remove satisfied filters, then the caller will know remaining filters
+ for filter_ in satisfied_filters:
+ hints.filters.remove(filter_)
- # Remove satisfied filters, then the caller will know remaining filters
- for filter_ in satisfied_filters:
- hints.filters.remove(filter_)
-
- return query
+ return query
+ except _WontMatch:
+ hints.cannot_match = True
+ return
def _limit(query, hints):
@@ -378,6 +409,10 @@ def filter_limit_query(model, query, hints):
# First try and satisfy any filters
query = _filter(model, query, hints)
+ if hints.cannot_match:
+ # Nothing's going to match, so don't bother with the query.
+ return []
+
# NOTE(henry-nash): Any unsatisfied filters will have been left in
# the hints list for the controller to handle. We can only try and
# limit here if all the filters are already satisfied since, if not,
diff --git a/keystone-moon/keystone/common/sql/migrate_repo/versions/045_placeholder.py b/keystone-moon/keystone/common/sql/migrate_repo/versions/045_placeholder.py
index b6f40719..2a98fb90 100644
--- a/keystone-moon/keystone/common/sql/migrate_repo/versions/045_placeholder.py
+++ b/keystone-moon/keystone/common/sql/migrate_repo/versions/045_placeholder.py
@@ -19,7 +19,3 @@
def upgrade(migrate_engine):
pass
-
-
-def downgrade(migration_engine):
- pass
diff --git a/keystone-moon/keystone/common/sql/migrate_repo/versions/046_placeholder.py b/keystone-moon/keystone/common/sql/migrate_repo/versions/046_placeholder.py
index b6f40719..2a98fb90 100644
--- a/keystone-moon/keystone/common/sql/migrate_repo/versions/046_placeholder.py
+++ b/keystone-moon/keystone/common/sql/migrate_repo/versions/046_placeholder.py
@@ -19,7 +19,3 @@
def upgrade(migrate_engine):
pass
-
-
-def downgrade(migration_engine):
- pass
diff --git a/keystone-moon/keystone/common/sql/migrate_repo/versions/047_placeholder.py b/keystone-moon/keystone/common/sql/migrate_repo/versions/047_placeholder.py
index b6f40719..2a98fb90 100644
--- a/keystone-moon/keystone/common/sql/migrate_repo/versions/047_placeholder.py
+++ b/keystone-moon/keystone/common/sql/migrate_repo/versions/047_placeholder.py
@@ -19,7 +19,3 @@
def upgrade(migrate_engine):
pass
-
-
-def downgrade(migration_engine):
- pass
diff --git a/keystone-moon/keystone/common/sql/migrate_repo/versions/048_placeholder.py b/keystone-moon/keystone/common/sql/migrate_repo/versions/048_placeholder.py
index b6f40719..2a98fb90 100644
--- a/keystone-moon/keystone/common/sql/migrate_repo/versions/048_placeholder.py
+++ b/keystone-moon/keystone/common/sql/migrate_repo/versions/048_placeholder.py
@@ -19,7 +19,3 @@
def upgrade(migrate_engine):
pass
-
-
-def downgrade(migration_engine):
- pass
diff --git a/keystone-moon/keystone/common/sql/migrate_repo/versions/049_placeholder.py b/keystone-moon/keystone/common/sql/migrate_repo/versions/049_placeholder.py
index b6f40719..2a98fb90 100644
--- a/keystone-moon/keystone/common/sql/migrate_repo/versions/049_placeholder.py
+++ b/keystone-moon/keystone/common/sql/migrate_repo/versions/049_placeholder.py
@@ -19,7 +19,3 @@
def upgrade(migrate_engine):
pass
-
-
-def downgrade(migration_engine):
- pass
diff --git a/keystone-moon/keystone/common/sql/migrate_repo/versions/050_fk_consistent_indexes.py b/keystone-moon/keystone/common/sql/migrate_repo/versions/050_fk_consistent_indexes.py
index 535a0944..c4b41580 100644
--- a/keystone-moon/keystone/common/sql/migrate_repo/versions/050_fk_consistent_indexes.py
+++ b/keystone-moon/keystone/common/sql/migrate_repo/versions/050_fk_consistent_indexes.py
@@ -27,7 +27,8 @@ def upgrade(migrate_engine):
# names, depending on version of MySQL used. We shoud make this naming
# consistent, by reverting index name to a consistent condition.
if any(i for i in endpoint.indexes if
- i.columns.keys() == ['service_id'] and i.name != 'service_id'):
+ list(i.columns.keys()) == ['service_id']
+ and i.name != 'service_id'):
# NOTE(i159): by this action will be made re-creation of an index
# with the new name. This can be considered as renaming under the
# MySQL rules.
@@ -37,13 +38,6 @@ def upgrade(migrate_engine):
meta, autoload=True)
if any(i for i in user_group_membership.indexes if
- i.columns.keys() == ['group_id'] and i.name != 'group_id'):
+ list(i.columns.keys()) == ['group_id']
+ and i.name != 'group_id'):
sa.Index('group_id', user_group_membership.c.group_id).create()
-
-
-def downgrade(migrate_engine):
- # NOTE(i159): index exists only in MySQL schemas, and got an inconsistent
- # name only when MySQL 5.5 renamed it after re-creation
- # (during migrations). So we just fixed inconsistency, there is no
- # necessity to revert it.
- pass
diff --git a/keystone-moon/keystone/common/sql/migrate_repo/versions/051_add_id_mapping.py b/keystone-moon/keystone/common/sql/migrate_repo/versions/051_add_id_mapping.py
index 074fbb63..59720f6e 100644
--- a/keystone-moon/keystone/common/sql/migrate_repo/versions/051_add_id_mapping.py
+++ b/keystone-moon/keystone/common/sql/migrate_repo/versions/051_add_id_mapping.py
@@ -39,11 +39,3 @@ def upgrade(migrate_engine):
mysql_engine='InnoDB',
mysql_charset='utf8')
mapping_table.create(migrate_engine, checkfirst=True)
-
-
-def downgrade(migrate_engine):
- meta = sql.MetaData()
- meta.bind = migrate_engine
-
- assignment = sql.Table(MAPPING_TABLE, meta, autoload=True)
- assignment.drop(migrate_engine, checkfirst=True)
diff --git a/keystone-moon/keystone/common/sql/migrate_repo/versions/052_add_auth_url_to_region.py b/keystone-moon/keystone/common/sql/migrate_repo/versions/052_add_auth_url_to_region.py
index 9f1fd9f0..86302a8f 100644
--- a/keystone-moon/keystone/common/sql/migrate_repo/versions/052_add_auth_url_to_region.py
+++ b/keystone-moon/keystone/common/sql/migrate_repo/versions/052_add_auth_url_to_region.py
@@ -14,6 +14,7 @@
import sqlalchemy as sql
+
_REGION_TABLE_NAME = 'region'
@@ -24,11 +25,3 @@ def upgrade(migrate_engine):
region_table = sql.Table(_REGION_TABLE_NAME, meta, autoload=True)
url_column = sql.Column('url', sql.String(255), nullable=True)
region_table.create_column(url_column)
-
-
-def downgrade(migrate_engine):
- meta = sql.MetaData()
- meta.bind = migrate_engine
-
- region_table = sql.Table(_REGION_TABLE_NAME, meta, autoload=True)
- region_table.drop_column('url')
diff --git a/keystone-moon/keystone/common/sql/migrate_repo/versions/053_endpoint_to_region_association.py b/keystone-moon/keystone/common/sql/migrate_repo/versions/053_endpoint_to_region_association.py
index 6dc0004f..c2be48f4 100644
--- a/keystone-moon/keystone/common/sql/migrate_repo/versions/053_endpoint_to_region_association.py
+++ b/keystone-moon/keystone/common/sql/migrate_repo/versions/053_endpoint_to_region_association.py
@@ -12,7 +12,6 @@
# License for the specific language governing permissions and limitations
# under the License.
-
"""Migrated the endpoint 'region' column to 'region_id.
In addition to the rename, the new column is made a foreign key to the
@@ -36,25 +35,9 @@ b. For each endpoint
ii. Assign the id to the region_id column
c. Remove the column region
-
-To Downgrade:
-
-Endpoint Table
-
-a. Add back in the region column
-b. For each endpoint
- i. Copy the region_id column to the region column
-c. Remove the column region_id
-
-Region Table
-
-Decrease the size of the id column in the region table, making sure that
-we don't get classing primary keys.
-
"""
import migrate
-import six
import sqlalchemy as sql
from sqlalchemy.orm import sessionmaker
@@ -90,39 +73,6 @@ def _migrate_to_region_id(migrate_engine, region_table, endpoint_table):
name='fk_endpoint_region_id').create()
-def _migrate_to_region(migrate_engine, region_table, endpoint_table):
- endpoints = list(endpoint_table.select().execute())
-
- for endpoint in endpoints:
- new_values = {'region': endpoint.region_id}
- f = endpoint_table.c.id == endpoint.id
- update = endpoint_table.update().where(f).values(new_values)
- migrate_engine.execute(update)
-
- if 'sqlite' != migrate_engine.name:
- migrate.ForeignKeyConstraint(
- columns=[endpoint_table.c.region_id],
- refcolumns=[region_table.c.id],
- name='fk_endpoint_region_id').drop()
- endpoint_table.c.region_id.drop()
-
-
-def _prepare_regions_for_id_truncation(migrate_engine, region_table):
- """Ensure there are no IDs that are bigger than 64 chars.
-
- The size of the id and parent_id fields where increased from 64 to 255
- during the upgrade. On downgrade we have to make sure that the ids can
- fit in the new column size. For rows with ids greater than this, we have
- no choice but to dump them.
-
- """
- for region in list(region_table.select().execute()):
- if (len(six.text_type(region.id)) > 64 or
- len(six.text_type(region.parent_region_id)) > 64):
- delete = region_table.delete(region_table.c.id == region.id)
- migrate_engine.execute(delete)
-
-
def upgrade(migrate_engine):
meta = sql.MetaData()
meta.bind = migrate_engine
@@ -138,19 +88,3 @@ def upgrade(migrate_engine):
_migrate_to_region_id(migrate_engine, region_table, endpoint_table)
endpoint_table.c.region.drop()
-
-
-def downgrade(migrate_engine):
- meta = sql.MetaData()
- meta.bind = migrate_engine
-
- region_table = sql.Table('region', meta, autoload=True)
- endpoint_table = sql.Table('endpoint', meta, autoload=True)
- region_column = sql.Column('region', sql.String(length=255))
- region_column.create(endpoint_table)
-
- _migrate_to_region(migrate_engine, region_table, endpoint_table)
- _prepare_regions_for_id_truncation(migrate_engine, region_table)
-
- region_table.c.id.alter(type=sql.String(length=64))
- region_table.c.parent_region_id.alter(type=sql.String(length=64))
diff --git a/keystone-moon/keystone/common/sql/migrate_repo/versions/054_add_actor_id_index.py b/keystone-moon/keystone/common/sql/migrate_repo/versions/054_add_actor_id_index.py
index 33b13b7d..caf4d66f 100644
--- a/keystone-moon/keystone/common/sql/migrate_repo/versions/054_add_actor_id_index.py
+++ b/keystone-moon/keystone/common/sql/migrate_repo/versions/054_add_actor_id_index.py
@@ -14,6 +14,7 @@
import sqlalchemy as sql
+
ASSIGNMENT_TABLE = 'assignment'
@@ -24,12 +25,3 @@ def upgrade(migrate_engine):
assignment = sql.Table(ASSIGNMENT_TABLE, meta, autoload=True)
idx = sql.Index('ix_actor_id', assignment.c.actor_id)
idx.create(migrate_engine)
-
-
-def downgrade(migrate_engine):
- meta = sql.MetaData()
- meta.bind = migrate_engine
-
- assignment = sql.Table(ASSIGNMENT_TABLE, meta, autoload=True)
- idx = sql.Index('ix_actor_id', assignment.c.actor_id)
- idx.drop(migrate_engine)
diff --git a/keystone-moon/keystone/common/sql/migrate_repo/versions/055_add_indexes_to_token_table.py b/keystone-moon/keystone/common/sql/migrate_repo/versions/055_add_indexes_to_token_table.py
index 1cfddd3f..a7f327ea 100644
--- a/keystone-moon/keystone/common/sql/migrate_repo/versions/055_add_indexes_to_token_table.py
+++ b/keystone-moon/keystone/common/sql/migrate_repo/versions/055_add_indexes_to_token_table.py
@@ -23,13 +23,3 @@ def upgrade(migrate_engine):
sql.Index('ix_token_user_id', token.c.user_id).create()
sql.Index('ix_token_trust_id', token.c.trust_id).create()
-
-
-def downgrade(migrate_engine):
- meta = sql.MetaData()
- meta.bind = migrate_engine
-
- token = sql.Table('token', meta, autoload=True)
-
- sql.Index('ix_token_user_id', token.c.user_id).drop()
- sql.Index('ix_token_trust_id', token.c.trust_id).drop()
diff --git a/keystone-moon/keystone/common/sql/migrate_repo/versions/056_placeholder.py b/keystone-moon/keystone/common/sql/migrate_repo/versions/056_placeholder.py
index 5f82254f..8bb40490 100644
--- a/keystone-moon/keystone/common/sql/migrate_repo/versions/056_placeholder.py
+++ b/keystone-moon/keystone/common/sql/migrate_repo/versions/056_placeholder.py
@@ -16,7 +16,3 @@
def upgrade(migrate_engine):
pass
-
-
-def downgrade(migration_engine):
- pass
diff --git a/keystone-moon/keystone/common/sql/migrate_repo/versions/057_placeholder.py b/keystone-moon/keystone/common/sql/migrate_repo/versions/057_placeholder.py
index 5f82254f..8bb40490 100644
--- a/keystone-moon/keystone/common/sql/migrate_repo/versions/057_placeholder.py
+++ b/keystone-moon/keystone/common/sql/migrate_repo/versions/057_placeholder.py
@@ -16,7 +16,3 @@
def upgrade(migrate_engine):
pass
-
-
-def downgrade(migration_engine):
- pass
diff --git a/keystone-moon/keystone/common/sql/migrate_repo/versions/058_placeholder.py b/keystone-moon/keystone/common/sql/migrate_repo/versions/058_placeholder.py
index 5f82254f..8bb40490 100644
--- a/keystone-moon/keystone/common/sql/migrate_repo/versions/058_placeholder.py
+++ b/keystone-moon/keystone/common/sql/migrate_repo/versions/058_placeholder.py
@@ -16,7 +16,3 @@
def upgrade(migrate_engine):
pass
-
-
-def downgrade(migration_engine):
- pass
diff --git a/keystone-moon/keystone/common/sql/migrate_repo/versions/059_placeholder.py b/keystone-moon/keystone/common/sql/migrate_repo/versions/059_placeholder.py
index 5f82254f..8bb40490 100644
--- a/keystone-moon/keystone/common/sql/migrate_repo/versions/059_placeholder.py
+++ b/keystone-moon/keystone/common/sql/migrate_repo/versions/059_placeholder.py
@@ -16,7 +16,3 @@
def upgrade(migrate_engine):
pass
-
-
-def downgrade(migration_engine):
- pass
diff --git a/keystone-moon/keystone/common/sql/migrate_repo/versions/060_placeholder.py b/keystone-moon/keystone/common/sql/migrate_repo/versions/060_placeholder.py
index 5f82254f..8bb40490 100644
--- a/keystone-moon/keystone/common/sql/migrate_repo/versions/060_placeholder.py
+++ b/keystone-moon/keystone/common/sql/migrate_repo/versions/060_placeholder.py
@@ -16,7 +16,3 @@
def upgrade(migrate_engine):
pass
-
-
-def downgrade(migration_engine):
- pass
diff --git a/keystone-moon/keystone/common/sql/migrate_repo/versions/061_add_parent_project.py b/keystone-moon/keystone/common/sql/migrate_repo/versions/061_add_parent_project.py
index bb8ef9f6..ca9b3ce2 100644
--- a/keystone-moon/keystone/common/sql/migrate_repo/versions/061_add_parent_project.py
+++ b/keystone-moon/keystone/common/sql/migrate_repo/versions/061_add_parent_project.py
@@ -14,6 +14,7 @@ import sqlalchemy as sql
from keystone.common.sql import migration_helpers
+
_PROJECT_TABLE_NAME = 'project'
_PARENT_ID_COLUMN_NAME = 'parent_id'
@@ -38,17 +39,3 @@ def upgrade(migrate_engine):
if migrate_engine.name == 'sqlite':
return
migration_helpers.add_constraints(list_constraints(project_table))
-
-
-def downgrade(migrate_engine):
- meta = sql.MetaData()
- meta.bind = migrate_engine
-
- project_table = sql.Table(_PROJECT_TABLE_NAME, meta, autoload=True)
-
- # SQLite does not support constraints, and querying the constraints
- # raises an exception
- if migrate_engine.name != 'sqlite':
- migration_helpers.remove_constraints(list_constraints(project_table))
-
- project_table.drop_column(_PARENT_ID_COLUMN_NAME)
diff --git a/keystone-moon/keystone/common/sql/migrate_repo/versions/062_drop_assignment_role_fk.py b/keystone-moon/keystone/common/sql/migrate_repo/versions/062_drop_assignment_role_fk.py
index 5a33486c..f7a69bb6 100644
--- a/keystone-moon/keystone/common/sql/migrate_repo/versions/062_drop_assignment_role_fk.py
+++ b/keystone-moon/keystone/common/sql/migrate_repo/versions/062_drop_assignment_role_fk.py
@@ -33,9 +33,3 @@ def upgrade(migrate_engine):
if migrate_engine.name == 'sqlite':
return
migration_helpers.remove_constraints(list_constraints(migrate_engine))
-
-
-def downgrade(migrate_engine):
- if migrate_engine.name == 'sqlite':
- return
- migration_helpers.add_constraints(list_constraints(migrate_engine))
diff --git a/keystone-moon/keystone/common/sql/migrate_repo/versions/063_drop_region_auth_url.py b/keystone-moon/keystone/common/sql/migrate_repo/versions/063_drop_region_auth_url.py
index 109a8412..e45133ab 100644
--- a/keystone-moon/keystone/common/sql/migrate_repo/versions/063_drop_region_auth_url.py
+++ b/keystone-moon/keystone/common/sql/migrate_repo/versions/063_drop_region_auth_url.py
@@ -12,6 +12,7 @@
import sqlalchemy as sql
+
_REGION_TABLE_NAME = 'region'
@@ -21,12 +22,3 @@ def upgrade(migrate_engine):
region_table = sql.Table(_REGION_TABLE_NAME, meta, autoload=True)
region_table.drop_column('url')
-
-
-def downgrade(migrate_engine):
- meta = sql.MetaData()
- meta.bind = migrate_engine
-
- region_table = sql.Table(_REGION_TABLE_NAME, meta, autoload=True)
- url_column = sql.Column('url', sql.String(255), nullable=True)
- region_table.create_column(url_column)
diff --git a/keystone-moon/keystone/common/sql/migrate_repo/versions/064_drop_user_and_group_fk.py b/keystone-moon/keystone/common/sql/migrate_repo/versions/064_drop_user_and_group_fk.py
index bca00902..637f2151 100644
--- a/keystone-moon/keystone/common/sql/migrate_repo/versions/064_drop_user_and_group_fk.py
+++ b/keystone-moon/keystone/common/sql/migrate_repo/versions/064_drop_user_and_group_fk.py
@@ -37,9 +37,3 @@ def upgrade(migrate_engine):
if migrate_engine.name == 'sqlite':
return
migration_helpers.remove_constraints(list_constraints(migrate_engine))
-
-
-def downgrade(migrate_engine):
- if migrate_engine.name == 'sqlite':
- return
- migration_helpers.add_constraints(list_constraints(migrate_engine))
diff --git a/keystone-moon/keystone/common/sql/migrate_repo/versions/065_add_domain_config.py b/keystone-moon/keystone/common/sql/migrate_repo/versions/065_add_domain_config.py
index fd8717d2..63a86c11 100644
--- a/keystone-moon/keystone/common/sql/migrate_repo/versions/065_add_domain_config.py
+++ b/keystone-moon/keystone/common/sql/migrate_repo/versions/065_add_domain_config.py
@@ -14,6 +14,7 @@ import sqlalchemy as sql
from keystone.common import sql as ks_sql
+
WHITELIST_TABLE = 'whitelisted_config'
SENSITIVE_TABLE = 'sensitive_config'
@@ -43,13 +44,3 @@ def upgrade(migrate_engine):
mysql_engine='InnoDB',
mysql_charset='utf8')
sensitive_table.create(migrate_engine, checkfirst=True)
-
-
-def downgrade(migrate_engine):
- meta = sql.MetaData()
- meta.bind = migrate_engine
-
- table = sql.Table(WHITELIST_TABLE, meta, autoload=True)
- table.drop(migrate_engine, checkfirst=True)
- table = sql.Table(SENSITIVE_TABLE, meta, autoload=True)
- table.drop(migrate_engine, checkfirst=True)
diff --git a/keystone-moon/keystone/common/sql/migrate_repo/versions/066_fixup_service_name_value.py b/keystone-moon/keystone/common/sql/migrate_repo/versions/066_fixup_service_name_value.py
index 3feadc53..fe0cee88 100644
--- a/keystone-moon/keystone/common/sql/migrate_repo/versions/066_fixup_service_name_value.py
+++ b/keystone-moon/keystone/common/sql/migrate_repo/versions/066_fixup_service_name_value.py
@@ -22,7 +22,11 @@ def upgrade(migrate_engine):
services = list(service_table.select().execute())
for service in services:
- extra_dict = jsonutils.loads(service.extra)
+ if service.extra is not None:
+ extra_dict = jsonutils.loads(service.extra)
+ else:
+ extra_dict = {}
+
# Skip records where service is not null
if extra_dict.get('name') is not None:
continue
@@ -34,10 +38,3 @@ def upgrade(migrate_engine):
f = service_table.c.id == service.id
update = service_table.update().where(f).values(new_values)
migrate_engine.execute(update)
-
-
-def downgrade(migration_engine):
- # The upgrade fixes the data inconsistency for the service name,
- # it defaults the value to empty string. There is no necessity
- # to revert it.
- pass
diff --git a/keystone-moon/keystone/common/sql/migrate_repo/versions/067_drop_redundant_mysql_index.py b/keystone-moon/keystone/common/sql/migrate_repo/versions/067_drop_redundant_mysql_index.py
new file mode 100644
index 00000000..b9df1a55
--- /dev/null
+++ b/keystone-moon/keystone/common/sql/migrate_repo/versions/067_drop_redundant_mysql_index.py
@@ -0,0 +1,25 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import sqlalchemy
+
+
+def upgrade(migrate_engine):
+ # NOTE(viktors): Migration 062 removed FK from `assignment` table, but
+ # MySQL silently creates indexes on FK constraints, so we should remove
+ # this index manually.
+ if migrate_engine.name == 'mysql':
+ meta = sqlalchemy.MetaData(bind=migrate_engine)
+ table = sqlalchemy.Table('assignment', meta, autoload=True)
+ for index in table.indexes:
+ if [c.name for c in index.columns] == ['role_id']:
+ index.drop(migrate_engine)
diff --git a/keystone-moon/keystone/common/sql/migrate_repo/versions/068_placeholder.py b/keystone-moon/keystone/common/sql/migrate_repo/versions/068_placeholder.py
new file mode 100644
index 00000000..111df9d4
--- /dev/null
+++ b/keystone-moon/keystone/common/sql/migrate_repo/versions/068_placeholder.py
@@ -0,0 +1,18 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+# This is a placeholder for Kilo backports. Do not use this number for new
+# Liberty work. New Liberty work starts after all the placeholders.
+
+
+def upgrade(migrate_engine):
+ pass
diff --git a/keystone-moon/keystone/common/sql/migrate_repo/versions/069_placeholder.py b/keystone-moon/keystone/common/sql/migrate_repo/versions/069_placeholder.py
new file mode 100644
index 00000000..111df9d4
--- /dev/null
+++ b/keystone-moon/keystone/common/sql/migrate_repo/versions/069_placeholder.py
@@ -0,0 +1,18 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+# This is a placeholder for Kilo backports. Do not use this number for new
+# Liberty work. New Liberty work starts after all the placeholders.
+
+
+def upgrade(migrate_engine):
+ pass
diff --git a/keystone-moon/keystone/common/sql/migrate_repo/versions/070_placeholder.py b/keystone-moon/keystone/common/sql/migrate_repo/versions/070_placeholder.py
new file mode 100644
index 00000000..111df9d4
--- /dev/null
+++ b/keystone-moon/keystone/common/sql/migrate_repo/versions/070_placeholder.py
@@ -0,0 +1,18 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+# This is a placeholder for Kilo backports. Do not use this number for new
+# Liberty work. New Liberty work starts after all the placeholders.
+
+
+def upgrade(migrate_engine):
+ pass
diff --git a/keystone-moon/keystone/common/sql/migrate_repo/versions/071_placeholder.py b/keystone-moon/keystone/common/sql/migrate_repo/versions/071_placeholder.py
new file mode 100644
index 00000000..111df9d4
--- /dev/null
+++ b/keystone-moon/keystone/common/sql/migrate_repo/versions/071_placeholder.py
@@ -0,0 +1,18 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+# This is a placeholder for Kilo backports. Do not use this number for new
+# Liberty work. New Liberty work starts after all the placeholders.
+
+
+def upgrade(migrate_engine):
+ pass
diff --git a/keystone-moon/keystone/common/sql/migrate_repo/versions/072_placeholder.py b/keystone-moon/keystone/common/sql/migrate_repo/versions/072_placeholder.py
new file mode 100644
index 00000000..111df9d4
--- /dev/null
+++ b/keystone-moon/keystone/common/sql/migrate_repo/versions/072_placeholder.py
@@ -0,0 +1,18 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+# This is a placeholder for Kilo backports. Do not use this number for new
+# Liberty work. New Liberty work starts after all the placeholders.
+
+
+def upgrade(migrate_engine):
+ pass
diff --git a/keystone-moon/keystone/common/sql/migrate_repo/versions/073_insert_assignment_inherited_pk.py b/keystone-moon/keystone/common/sql/migrate_repo/versions/073_insert_assignment_inherited_pk.py
new file mode 100644
index 00000000..ffa210c4
--- /dev/null
+++ b/keystone-moon/keystone/common/sql/migrate_repo/versions/073_insert_assignment_inherited_pk.py
@@ -0,0 +1,114 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import migrate
+import sqlalchemy as sql
+from sqlalchemy.orm import sessionmaker
+
+from keystone.assignment.backends import sql as assignment_sql
+
+
+def upgrade(migrate_engine):
+ """Inserts inherited column to assignment table PK contraints.
+
+ For non-SQLite databases, it changes the constraint in the existing table.
+
+ For SQLite, since changing constraints is not supported, it recreates the
+ assignment table with the new PK constraint and migrates the existing data.
+
+ """
+
+ ASSIGNMENT_TABLE_NAME = 'assignment'
+
+ metadata = sql.MetaData()
+ metadata.bind = migrate_engine
+
+ # Retrieve the existing assignment table
+ assignment_table = sql.Table(ASSIGNMENT_TABLE_NAME, metadata,
+ autoload=True)
+
+ if migrate_engine.name == 'sqlite':
+ ACTOR_ID_INDEX_NAME = 'ix_actor_id'
+ TMP_ASSIGNMENT_TABLE_NAME = 'tmp_assignment'
+
+ # Define the new assignment table with a temporary name
+ new_assignment_table = sql.Table(
+ TMP_ASSIGNMENT_TABLE_NAME, metadata,
+ sql.Column('type', sql.Enum(
+ assignment_sql.AssignmentType.USER_PROJECT,
+ assignment_sql.AssignmentType.GROUP_PROJECT,
+ assignment_sql.AssignmentType.USER_DOMAIN,
+ assignment_sql.AssignmentType.GROUP_DOMAIN,
+ name='type'),
+ nullable=False),
+ sql.Column('actor_id', sql.String(64), nullable=False),
+ sql.Column('target_id', sql.String(64), nullable=False),
+ sql.Column('role_id', sql.String(64), sql.ForeignKey('role.id'),
+ nullable=False),
+ sql.Column('inherited', sql.Boolean, default=False,
+ nullable=False),
+ sql.PrimaryKeyConstraint('type', 'actor_id', 'target_id',
+ 'role_id', 'inherited'),
+ mysql_engine='InnoDB',
+ mysql_charset='utf8')
+
+ # Create the new assignment table
+ new_assignment_table.create(migrate_engine, checkfirst=True)
+
+ # Change the index from the existing assignment table to the new one
+ sql.Index(ACTOR_ID_INDEX_NAME, assignment_table.c.actor_id).drop()
+ sql.Index(ACTOR_ID_INDEX_NAME,
+ new_assignment_table.c.actor_id).create()
+
+ # Instantiate session
+ maker = sessionmaker(bind=migrate_engine)
+ session = maker()
+
+ # Migrate existing data
+ insert = new_assignment_table.insert().from_select(
+ assignment_table.c, select=session.query(assignment_table))
+ session.execute(insert)
+ session.commit()
+
+ # Drop the existing assignment table, in favor of the new one
+ assignment_table.deregister()
+ assignment_table.drop()
+
+ # Finally, rename the new table to the original assignment table name
+ new_assignment_table.rename(ASSIGNMENT_TABLE_NAME)
+ elif migrate_engine.name == 'ibm_db_sa':
+ # Recreate the existing constraint, marking the inherited column as PK
+ # for DB2.
+
+ # This is a workaround to the general case in the else statement below.
+ # Due to a bug in the DB2 sqlalchemy dialect, Column.alter() actually
+ # creates a primary key over only the "inherited" column. This is wrong
+ # because the primary key for the table actually covers other columns
+ # too, not just the "inherited" column. Since the primary key already
+ # exists for the table after the Column.alter() call, it causes the
+ # next line to fail with an error that the primary key already exists.
+
+ # The workaround here skips doing the Column.alter(). This causes a
+ # warning message since the metadata is out of sync. We can remove this
+ # workaround once the DB2 sqlalchemy dialect is fixed.
+ # DB2 Issue: https://code.google.com/p/ibm-db/issues/detail?id=173
+
+ migrate.PrimaryKeyConstraint(table=assignment_table).drop()
+ migrate.PrimaryKeyConstraint(
+ assignment_table.c.type, assignment_table.c.actor_id,
+ assignment_table.c.target_id, assignment_table.c.role_id,
+ assignment_table.c.inherited).create()
+ else:
+ # Recreate the existing constraint, marking the inherited column as PK
+ migrate.PrimaryKeyConstraint(table=assignment_table).drop()
+ assignment_table.c.inherited.alter(primary_key=True)
+ migrate.PrimaryKeyConstraint(table=assignment_table).create()
diff --git a/keystone-moon/keystone/common/sql/migrate_repo/versions/074_add_is_domain_project.py b/keystone-moon/keystone/common/sql/migrate_repo/versions/074_add_is_domain_project.py
new file mode 100644
index 00000000..dcb89b07
--- /dev/null
+++ b/keystone-moon/keystone/common/sql/migrate_repo/versions/074_add_is_domain_project.py
@@ -0,0 +1,27 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import sqlalchemy as sql
+
+
+_PROJECT_TABLE_NAME = 'project'
+_IS_DOMAIN_COLUMN_NAME = 'is_domain'
+
+
+def upgrade(migrate_engine):
+ meta = sql.MetaData()
+ meta.bind = migrate_engine
+
+ project_table = sql.Table(_PROJECT_TABLE_NAME, meta, autoload=True)
+ is_domain = sql.Column(_IS_DOMAIN_COLUMN_NAME, sql.Boolean, nullable=False,
+ server_default='0', default=False)
+ project_table.create_column(is_domain)
diff --git a/keystone-moon/keystone/common/sql/migration_helpers.py b/keystone-moon/keystone/common/sql/migration_helpers.py
index 86932995..aaa59f70 100644
--- a/keystone-moon/keystone/common/sql/migration_helpers.py
+++ b/keystone-moon/keystone/common/sql/migration_helpers.py
@@ -143,37 +143,21 @@ def _sync_common_repo(version):
abs_path = find_migrate_repo()
init_version = migrate_repo.DB_INIT_VERSION
engine = sql.get_engine()
+ _assert_not_schema_downgrade(version=version)
migration.db_sync(engine, abs_path, version=version,
- init_version=init_version)
+ init_version=init_version, sanity_check=False)
-def _fix_federation_tables(engine):
- """Fix the identity_provider, federation_protocol and mapping tables
- to be InnoDB and Charset UTF8.
-
- This function is to work around bug #1426334. This has occurred because
- the original migration did not specify InnoDB and charset utf8. Due
- to the sanity_check, a deployer can get wedged here and require manual
- database changes to fix.
- """
- # NOTE(marco-fargetta) This is a workaround to "fix" that tables only
- # if we're under MySQL
- if engine.name == 'mysql':
- # * Disable any check for the foreign keys because they prevent the
- # alter table to execute
- engine.execute("SET foreign_key_checks = 0")
- # * Make the tables using InnoDB engine
- engine.execute("ALTER TABLE identity_provider Engine=InnoDB")
- engine.execute("ALTER TABLE federation_protocol Engine=InnoDB")
- engine.execute("ALTER TABLE mapping Engine=InnoDB")
- # * Make the tables using utf8 encoding
- engine.execute("ALTER TABLE identity_provider "
- "CONVERT TO CHARACTER SET utf8")
- engine.execute("ALTER TABLE federation_protocol "
- "CONVERT TO CHARACTER SET utf8")
- engine.execute("ALTER TABLE mapping CONVERT TO CHARACTER SET utf8")
- # * Revert the foreign keys check back
- engine.execute("SET foreign_key_checks = 1")
+def _assert_not_schema_downgrade(extension=None, version=None):
+ if version is not None:
+ try:
+ current_ver = int(six.text_type(get_db_version(extension)))
+ if int(version) < current_ver:
+ raise migration.exception.DbMigrationError()
+ except exceptions.DatabaseNotControlledError:
+ # NOTE(morganfainberg): The database is not controlled, this action
+ # cannot be a downgrade.
+ pass
def _sync_extension_repo(extension, version):
@@ -198,27 +182,11 @@ def _sync_extension_repo(extension, version):
except exception.MigrationNotProvided as e:
print(e)
sys.exit(1)
- try:
- migration.db_sync(engine, abs_path, version=version,
- init_version=init_version)
- except ValueError:
- # NOTE(marco-fargetta): ValueError is raised from the sanity check (
- # verifies that tables are utf8 under mysql). The federation_protocol,
- # identity_provider and mapping tables were not initially built with
- # InnoDB and utf8 as part of the table arguments when the migration
- # was initially created. Bug #1426334 is a scenario where the deployer
- # can get wedged, unable to upgrade or downgrade.
- # This is a workaround to "fix" those tables if we're under MySQL and
- # the version is before the 6 because before the tables were introduced
- # before and patched when migration 5 was available
- if engine.name == 'mysql' and \
- int(six.text_type(get_db_version(extension))) < 6:
- _fix_federation_tables(engine)
- # The migration is applied again after the fix
- migration.db_sync(engine, abs_path, version=version,
- init_version=init_version)
- else:
- raise
+
+ _assert_not_schema_downgrade(extension=extension, version=version)
+
+ migration.db_sync(engine, abs_path, version=version,
+ init_version=init_version, sanity_check=False)
def sync_database_to_version(extension=None, version=None):
diff --git a/keystone-moon/keystone/common/utils.py b/keystone-moon/keystone/common/utils.py
index a4b03ffd..48336af7 100644
--- a/keystone-moon/keystone/common/utils.py
+++ b/keystone-moon/keystone/common/utils.py
@@ -27,10 +27,12 @@ from oslo_config import cfg
from oslo_log import log
from oslo_serialization import jsonutils
from oslo_utils import strutils
+from oslo_utils import timeutils
import passlib.hash
import six
from six import moves
+from keystone.common import authorization
from keystone import exception
from keystone.i18n import _, _LE, _LW
@@ -51,7 +53,7 @@ def flatten_dict(d, parent_key=''):
for k, v in d.items():
new_key = parent_key + '.' + k if parent_key else k
if isinstance(v, collections.MutableMapping):
- items.extend(flatten_dict(v, new_key).items())
+ items.extend(list(flatten_dict(v, new_key).items()))
else:
items.append((new_key, v))
return dict(items)
@@ -244,7 +246,7 @@ def setup_remote_pydev_debug():
def get_unix_user(user=None):
- '''Get the uid and user name.
+ """Get the uid and user name.
This is a convenience utility which accepts a variety of input
which might represent a unix user. If successful it returns the uid
@@ -257,7 +259,7 @@ def get_unix_user(user=None):
lookup as a uid.
int
- An integer is interpretted as a uid.
+ An integer is interpreted as a uid.
None
None is interpreted to mean use the current process's
@@ -270,7 +272,8 @@ def get_unix_user(user=None):
lookup.
:return: tuple of (uid, name)
- '''
+
+ """
if isinstance(user, six.string_types):
try:
@@ -299,7 +302,7 @@ def get_unix_user(user=None):
def get_unix_group(group=None):
- '''Get the gid and group name.
+ """Get the gid and group name.
This is a convenience utility which accepts a variety of input
which might represent a unix group. If successful it returns the gid
@@ -312,7 +315,7 @@ def get_unix_group(group=None):
lookup as a gid.
int
- An integer is interpretted as a gid.
+ An integer is interpreted as a gid.
None
None is interpreted to mean use the current process's
@@ -326,7 +329,8 @@ def get_unix_group(group=None):
lookup.
:return: tuple of (gid, name)
- '''
+
+ """
if isinstance(group, six.string_types):
try:
@@ -357,7 +361,7 @@ def get_unix_group(group=None):
def set_permissions(path, mode=None, user=None, group=None, log=None):
- '''Set the ownership and permissions on the pathname.
+ """Set the ownership and permissions on the pathname.
Each of the mode, user and group are optional, if None then
that aspect is not modified.
@@ -374,7 +378,8 @@ def set_permissions(path, mode=None, user=None, group=None, log=None):
if None do not set.
:param logger log: logging.logger object, used to emit log messages,
if None no logging is performed.
- '''
+
+ """
if user is None:
user_uid, user_name = None, None
@@ -420,7 +425,7 @@ def set_permissions(path, mode=None, user=None, group=None, log=None):
def make_dirs(path, mode=None, user=None, group=None, log=None):
- '''Assure directory exists, set ownership and permissions.
+ """Assure directory exists, set ownership and permissions.
Assure the directory exists and optionally set its ownership
and permissions.
@@ -440,7 +445,8 @@ def make_dirs(path, mode=None, user=None, group=None, log=None):
if None do not set.
:param logger log: logging.logger object, used to emit log messages,
if None no logging is performed.
- '''
+
+ """
if log:
if mode is None:
@@ -469,3 +475,54 @@ class WhiteListedItemFilter(object):
if name not in self._whitelist:
raise KeyError
return self._data[name]
+
+
+_ISO8601_TIME_FORMAT_SUBSECOND = '%Y-%m-%dT%H:%M:%S.%f'
+_ISO8601_TIME_FORMAT = '%Y-%m-%dT%H:%M:%S'
+
+
+def isotime(at=None, subsecond=False):
+ """Stringify time in ISO 8601 format."""
+
+ # Python provides a similar instance method for datetime.datetime objects
+ # called isoformat(). The format of the strings generated by isoformat()
+ # have a couple of problems:
+ # 1) The strings generated by isotime are used in tokens and other public
+ # APIs that we can't change without a deprecation period. The strings
+ # generated by isoformat are not the same format, so we can't just
+ # change to it.
+ # 2) The strings generated by isoformat do not include the microseconds if
+ # the value happens to be 0. This will likely show up as random failures
+ # as parsers may be written to always expect microseconds, and it will
+ # parse correctly most of the time.
+
+ if not at:
+ at = timeutils.utcnow()
+ st = at.strftime(_ISO8601_TIME_FORMAT
+ if not subsecond
+ else _ISO8601_TIME_FORMAT_SUBSECOND)
+ tz = at.tzinfo.tzname(None) if at.tzinfo else 'UTC'
+ st += ('Z' if tz == 'UTC' else tz)
+ return st
+
+
+def strtime():
+ at = timeutils.utcnow()
+ return at.strftime(timeutils.PERFECT_TIME_FORMAT)
+
+
+def get_token_ref(context):
+ """Retrieves KeystoneToken object from the auth context and returns it.
+
+ :param dict context: The request context.
+ :raises: exception.Unauthorized if auth context cannot be found.
+ :returns: The KeystoneToken object.
+ """
+ try:
+ # Retrieve the auth context that was prepared by AuthContextMiddleware.
+ auth_context = (context['environment']
+ [authorization.AUTH_CONTEXT_ENV])
+ return auth_context['token']
+ except KeyError:
+ LOG.warning(_LW("Couldn't find the auth context."))
+ raise exception.Unauthorized()
diff --git a/keystone-moon/keystone/common/validation/__init__.py b/keystone-moon/keystone/common/validation/__init__.py
index f9c58eaf..1e5cc6a5 100644
--- a/keystone-moon/keystone/common/validation/__init__.py
+++ b/keystone-moon/keystone/common/validation/__init__.py
@@ -12,8 +12,11 @@
"""Request body validating middleware for OpenStack Identity resources."""
import functools
+import inspect
from keystone.common.validation import validators
+from keystone import exception
+from keystone.i18n import _
def validated(request_body_schema, resource_to_validate):
@@ -24,15 +27,47 @@ def validated(request_body_schema, resource_to_validate):
:param request_body_schema: a schema to validate the resource reference
:param resource_to_validate: the reference to validate
+ :raises keystone.exception.ValidationError: if `resource_to_validate` is
+ not passed by or passed with an empty value (see wrapper method
+ below).
+ :raises TypeError: at decoration time when the expected resource to
+ validate isn't found in the decorated method's
+ signature
"""
schema_validator = validators.SchemaValidator(request_body_schema)
def add_validator(func):
+ argspec = inspect.getargspec(func)
+ try:
+ arg_index = argspec.args.index(resource_to_validate)
+ except ValueError:
+ raise TypeError(_('validated expected to find %(param_name)r in '
+ 'function signature for %(func_name)r.') %
+ {'param_name': resource_to_validate,
+ 'func_name': func.__name__})
+
@functools.wraps(func)
def wrapper(*args, **kwargs):
- if resource_to_validate in kwargs:
+ if kwargs.get(resource_to_validate):
schema_validator.validate(kwargs[resource_to_validate])
+ else:
+ try:
+ resource = args[arg_index]
+ # If resource to be validated is empty, no need to do
+ # validation since the message given by jsonschema doesn't
+ # help in this case.
+ if resource:
+ schema_validator.validate(resource)
+ else:
+ raise exception.ValidationError(
+ attribute=resource_to_validate,
+ target='request body')
+ # We cannot find the resource neither from kwargs nor args.
+ except IndexError:
+ raise exception.ValidationError(
+ attribute=resource_to_validate,
+ target='request body')
return func(*args, **kwargs)
return wrapper
return add_validator
diff --git a/keystone-moon/keystone/common/validation/parameter_types.py b/keystone-moon/keystone/common/validation/parameter_types.py
index c5908836..1bc81383 100644
--- a/keystone-moon/keystone/common/validation/parameter_types.py
+++ b/keystone-moon/keystone/common/validation/parameter_types.py
@@ -28,6 +28,12 @@ name = {
'maxLength': 255
}
+external_id_string = {
+ 'type': 'string',
+ 'minLength': 1,
+ 'maxLength': 64
+}
+
id_string = {
'type': 'string',
'minLength': 1,
diff --git a/keystone-moon/keystone/common/wsgi.py b/keystone-moon/keystone/common/wsgi.py
index 6ee8150d..0dee954b 100644
--- a/keystone-moon/keystone/common/wsgi.py
+++ b/keystone-moon/keystone/common/wsgi.py
@@ -20,7 +20,7 @@
import copy
import itertools
-import urllib
+import wsgiref.util
from oslo_config import cfg
import oslo_i18n
@@ -49,10 +49,12 @@ LOG = log.getLogger(__name__)
# Environment variable used to pass the request context
CONTEXT_ENV = 'openstack.context'
-
# Environment variable used to pass the request params
PARAMS_ENV = 'openstack.params'
+JSON_ENCODE_CONTENT_TYPES = set(['application/json',
+ 'application/json-home'])
+
def validate_token_bind(context, token_ref):
bind_mode = CONF.token.enforce_token_bind
@@ -84,7 +86,7 @@ def validate_token_bind(context, token_ref):
LOG.info(_LI("Named bind mode %s not in bind information"), name)
raise exception.Unauthorized()
- for bind_type, identifier in six.iteritems(bind):
+ for bind_type, identifier in bind.items():
if bind_type == 'kerberos':
if not (context['environment'].get('AUTH_TYPE', '').lower()
== 'negotiate'):
@@ -195,8 +197,16 @@ class Application(BaseApplication):
# allow middleware up the stack to provide context, params and headers.
context = req.environ.get(CONTEXT_ENV, {})
- context['query_string'] = dict(six.iteritems(req.params))
- context['headers'] = dict(six.iteritems(req.headers))
+
+ try:
+ context['query_string'] = dict(req.params.items())
+ except UnicodeDecodeError as e:
+ # The webob package throws UnicodeError when a request cannot be
+ # decoded. Raise ValidationError instead to avoid an UnknownError.
+ msg = _('Query string is not UTF-8 encoded')
+ raise exception.ValidationError(msg)
+
+ context['headers'] = dict(req.headers.items())
context['path'] = req.environ['PATH_INFO']
scheme = (None if not CONF.secure_proxy_ssl_header
else req.environ.get(CONF.secure_proxy_ssl_header))
@@ -211,8 +221,8 @@ class Application(BaseApplication):
context['host_url'] = req.host_url
params = req.environ.get(PARAMS_ENV, {})
# authentication and authorization attributes are set as environment
- # values by the container and processed by the pipeline. the complete
- # set is not yet know.
+ # values by the container and processed by the pipeline. The complete
+ # set is not yet known.
context['environment'] = req.environ
context['accept_header'] = req.accept
req.environ = None
@@ -227,11 +237,10 @@ class Application(BaseApplication):
# NOTE(morganfainberg): use the request method to normalize the
# response code between GET and HEAD requests. The HTTP status should
# be the same.
- req_method = req.environ['REQUEST_METHOD'].upper()
- LOG.info('%(req_method)s %(path)s?%(params)s', {
- 'req_method': req_method,
- 'path': context['path'],
- 'params': urllib.urlencode(req.params)})
+ LOG.info('%(req_method)s %(uri)s', {
+ 'req_method': req.environ['REQUEST_METHOD'].upper(),
+ 'uri': wsgiref.util.request_uri(req.environ),
+ })
params = self._normalize_dict(params)
@@ -270,7 +279,7 @@ class Application(BaseApplication):
response_code = self._get_response_code(req)
return render_response(body=result, status=response_code,
- method=req_method)
+ method=req.environ['REQUEST_METHOD'])
def _get_response_code(self, req):
req_method = req.environ['REQUEST_METHOD']
@@ -284,17 +293,21 @@ class Application(BaseApplication):
return arg.replace(':', '_').replace('-', '_')
def _normalize_dict(self, d):
- return {self._normalize_arg(k): v for (k, v) in six.iteritems(d)}
+ return {self._normalize_arg(k): v for (k, v) in d.items()}
def assert_admin(self, context):
+ """Ensure the user is an admin.
+
+ :raises keystone.exception.Unauthorized: if a token could not be
+ found/authorized, a user is invalid, or a tenant is
+ invalid/not scoped.
+ :raises keystone.exception.Forbidden: if the user is not an admin and
+ does not have the admin role
+
+ """
+
if not context['is_admin']:
- try:
- user_token_ref = token_model.KeystoneToken(
- token_id=context['token_id'],
- token_data=self.token_provider_api.validate_token(
- context['token_id']))
- except exception.TokenNotFound as e:
- raise exception.Unauthorized(e)
+ user_token_ref = utils.get_token_ref(context)
validate_token_bind(context, user_token_ref)
creds = copy.deepcopy(user_token_ref.metadata)
@@ -353,16 +366,7 @@ class Application(BaseApplication):
LOG.debug(('will not lookup trust as the request auth token is '
'either absent or it is the system admin token'))
return None
-
- try:
- token_data = self.token_provider_api.validate_token(
- context['token_id'])
- except exception.TokenNotFound:
- LOG.warning(_LW('Invalid token in _get_trust_id_for_request'))
- raise exception.Unauthorized()
-
- token_ref = token_model.KeystoneToken(token_id=context['token_id'],
- token_data=token_data)
+ token_ref = utils.get_token_ref(context)
return token_ref.trust_id
@classmethod
@@ -371,8 +375,7 @@ class Application(BaseApplication):
if url:
substitutions = dict(
- itertools.chain(six.iteritems(CONF),
- six.iteritems(CONF.eventlet_server)))
+ itertools.chain(CONF.items(), CONF.eventlet_server.items()))
url = url % substitutions
else:
@@ -491,7 +494,7 @@ class Debug(Middleware):
resp = req.get_response(self.application)
if not hasattr(LOG, 'isEnabledFor') or LOG.isEnabledFor(LOG.debug):
LOG.debug('%s %s %s', ('*' * 20), 'RESPONSE HEADERS', ('*' * 20))
- for (key, value) in six.iteritems(resp.headers):
+ for (key, value) in resp.headers.items():
LOG.debug('%s = %s', key, value)
LOG.debug('')
@@ -603,7 +606,7 @@ class ExtensionRouter(Router):
mapper = routes.Mapper()
self.application = application
self.add_routes(mapper)
- mapper.connect('{path_info:.*}', controller=self.application)
+ mapper.connect('/{path_info:.*}', controller=self.application)
super(ExtensionRouter, self).__init__(mapper)
def add_routes(self, mapper):
@@ -657,7 +660,7 @@ class RoutersBase(object):
get_action=None, head_action=None, get_head_action=None,
put_action=None, post_action=None, patch_action=None,
delete_action=None, get_post_action=None,
- path_vars=None, status=None):
+ path_vars=None, status=json_home.Status.STABLE):
if get_head_action:
getattr(controller, get_head_action) # ensure the attribute exists
mapper.connect(path, controller=controller, action=get_head_action,
@@ -699,13 +702,7 @@ class RoutersBase(object):
else:
resource_data['href'] = path
- if status:
- if not json_home.Status.is_supported(status):
- raise exception.Error(message=_(
- 'Unexpected status requested for JSON Home response, %s') %
- status)
- resource_data.setdefault('hints', {})
- resource_data['hints']['status'] = status
+ json_home.Status.update_resource_data(resource_data, status)
self.v3_resources.append((rel, resource_data))
@@ -762,8 +759,6 @@ def render_response(body=None, status=None, headers=None, method=None):
else:
content_type = None
- JSON_ENCODE_CONTENT_TYPES = ('application/json',
- 'application/json-home',)
if content_type is None or content_type in JSON_ENCODE_CONTENT_TYPES:
body = jsonutils.dumps(body, cls=utils.SmarterEncoder)
if content_type is None:
@@ -774,7 +769,7 @@ def render_response(body=None, status=None, headers=None, method=None):
status='%s %s' % status,
headerlist=headers)
- if method == 'HEAD':
+ if method and method.upper() == 'HEAD':
# NOTE(morganfainberg): HEAD requests should return the same status
# as a GET request and same headers (including content-type and
# content-length). The webob.Response object automatically changes
@@ -785,7 +780,7 @@ def render_response(body=None, status=None, headers=None, method=None):
# both py2x and py3x.
stored_headers = resp.headers.copy()
resp.body = b''
- for header, value in six.iteritems(stored_headers):
+ for header, value in stored_headers.items():
resp.headers[header] = value
return resp
@@ -820,8 +815,7 @@ def render_exception(error, context=None, request=None, user_locale=None):
url = 'http://localhost:%d' % CONF.eventlet_server.public_port
else:
substitutions = dict(
- itertools.chain(six.iteritems(CONF),
- six.iteritems(CONF.eventlet_server)))
+ itertools.chain(CONF.items(), CONF.eventlet_server.items()))
url = url % substitutions
headers.append(('WWW-Authenticate', 'Keystone uri="%s"' % url))
diff --git a/keystone-moon/keystone/config.py b/keystone-moon/keystone/config.py
index 3d9a29fd..3967cee0 100644
--- a/keystone-moon/keystone/config.py
+++ b/keystone-moon/keystone/config.py
@@ -47,7 +47,8 @@ def set_default_for_default_log_levels():
]
log.register_options(CONF)
- CONF.default_log_levels.extend(extra_log_level_defaults)
+ CONF.set_default('default_log_levels',
+ CONF.default_log_levels + extra_log_level_defaults)
def setup_logging():
diff --git a/keystone-moon/keystone/contrib/ec2/controllers.py b/keystone-moon/keystone/contrib/ec2/controllers.py
index 6e6d3268..78172ec9 100644
--- a/keystone-moon/keystone/contrib/ec2/controllers.py
+++ b/keystone-moon/keystone/contrib/ec2/controllers.py
@@ -46,7 +46,6 @@ from keystone.common import utils
from keystone.common import wsgi
from keystone import exception
from keystone.i18n import _
-from keystone.models import token_model
@dependency.requires('assignment_api', 'catalog_api', 'credential_api',
@@ -57,16 +56,30 @@ class Ec2ControllerCommon(object):
def check_signature(self, creds_ref, credentials):
signer = ec2_utils.Ec2Signer(creds_ref['secret'])
signature = signer.generate(credentials)
- if utils.auth_str_equal(credentials['signature'], signature):
- return
- # NOTE(vish): Some libraries don't use the port when signing
- # requests, so try again without port.
- elif ':' in credentials['signature']:
- hostname, _port = credentials['host'].split(':')
- credentials['host'] = hostname
- signature = signer.generate(credentials)
- if not utils.auth_str_equal(credentials.signature, signature):
- raise exception.Unauthorized(message='Invalid EC2 signature.')
+ # NOTE(davechen): credentials.get('signature') is not guaranteed to
+ # exist, we need check it explicitly.
+ if credentials.get('signature'):
+ if utils.auth_str_equal(credentials['signature'], signature):
+ return True
+ # NOTE(vish): Some client libraries don't use the port when signing
+ # requests, so try again without port.
+ elif ':' in credentials['host']:
+ hostname, _port = credentials['host'].split(':')
+ credentials['host'] = hostname
+ # NOTE(davechen): we need reinitialize 'signer' to avoid
+ # contaminated status of signature, this is similar with
+ # other programming language libraries, JAVA for example.
+ signer = ec2_utils.Ec2Signer(creds_ref['secret'])
+ signature = signer.generate(credentials)
+ if utils.auth_str_equal(credentials['signature'],
+ signature):
+ return True
+ raise exception.Unauthorized(
+ message='Invalid EC2 signature.')
+ else:
+ raise exception.Unauthorized(
+ message='EC2 signature not supplied.')
+ # Raise the exception when credentials.get('signature') is None
else:
raise exception.Unauthorized(message='EC2 signature not supplied.')
@@ -305,14 +318,7 @@ class Ec2Controller(Ec2ControllerCommon, controller.V2Controller):
:raises exception.Forbidden: when token is invalid
"""
- try:
- token_data = self.token_provider_api.validate_token(
- context['token_id'])
- except exception.TokenNotFound as e:
- raise exception.Unauthorized(e)
-
- token_ref = token_model.KeystoneToken(token_id=context['token_id'],
- token_data=token_data)
+ token_ref = utils.get_token_ref(context)
if token_ref.user_id != user_id:
raise exception.Forbidden(_('Token belongs to another user'))
@@ -329,7 +335,7 @@ class Ec2Controller(Ec2ControllerCommon, controller.V2Controller):
# to properly perform policy enforcement.
self.assert_admin(context)
return True
- except exception.Forbidden:
+ except (exception.Forbidden, exception.Unauthorized):
return False
def _assert_owner(self, user_id, credential_id):
@@ -349,11 +355,11 @@ class Ec2Controller(Ec2ControllerCommon, controller.V2Controller):
@dependency.requires('policy_api', 'token_provider_api')
class Ec2ControllerV3(Ec2ControllerCommon, controller.V3Controller):
- member_name = 'project'
+ collection_name = 'credentials'
+ member_name = 'credential'
def __init__(self):
super(Ec2ControllerV3, self).__init__()
- self.get_member_from_driver = self.credential_api.get_credential
def _check_credential_owner_and_user_id_match(self, context, prep_info,
user_id, credential_id):
@@ -385,23 +391,35 @@ class Ec2ControllerV3(Ec2ControllerCommon, controller.V3Controller):
@controller.protected(callback=_check_credential_owner_and_user_id_match)
def ec2_get_credential(self, context, user_id, credential_id):
- return super(Ec2ControllerV3, self).get_credential(user_id,
- credential_id)
+ ref = super(Ec2ControllerV3, self).get_credential(user_id,
+ credential_id)
+ return Ec2ControllerV3.wrap_member(context, ref['credential'])
@controller.protected()
def ec2_list_credentials(self, context, user_id):
- return super(Ec2ControllerV3, self).get_credentials(user_id)
+ refs = super(Ec2ControllerV3, self).get_credentials(user_id)
+ return Ec2ControllerV3.wrap_collection(context, refs['credentials'])
@controller.protected()
def ec2_create_credential(self, context, user_id, tenant_id):
- return super(Ec2ControllerV3, self).create_credential(context, user_id,
- tenant_id)
+ ref = super(Ec2ControllerV3, self).create_credential(context, user_id,
+ tenant_id)
+ return Ec2ControllerV3.wrap_member(context, ref['credential'])
@controller.protected(callback=_check_credential_owner_and_user_id_match)
def ec2_delete_credential(self, context, user_id, credential_id):
return super(Ec2ControllerV3, self).delete_credential(user_id,
credential_id)
+ @classmethod
+ def _add_self_referential_link(cls, context, ref):
+ path = '/users/%(user_id)s/credentials/OS-EC2/%(credential_id)s'
+ url = cls.base_url(context, path) % {
+ 'user_id': ref['user_id'],
+ 'credential_id': ref['access']}
+ ref.setdefault('links', {})
+ ref['links']['self'] = url
+
def render_token_data_response(token_id, token_data):
"""Render token data HTTP response.
diff --git a/keystone-moon/keystone/contrib/endpoint_filter/backends/catalog_sql.py b/keystone-moon/keystone/contrib/endpoint_filter/backends/catalog_sql.py
index 6ac3c1ca..22d5796a 100644
--- a/keystone-moon/keystone/contrib/endpoint_filter/backends/catalog_sql.py
+++ b/keystone-moon/keystone/contrib/endpoint_filter/backends/catalog_sql.py
@@ -13,20 +13,20 @@
# under the License.
from oslo_config import cfg
-import six
from keystone.catalog.backends import sql
from keystone.catalog import core as catalog_core
from keystone.common import dependency
from keystone import exception
+
CONF = cfg.CONF
@dependency.requires('endpoint_filter_api')
class EndpointFilterCatalog(sql.Catalog):
def get_v3_catalog(self, user_id, project_id):
- substitutions = dict(six.iteritems(CONF))
+ substitutions = dict(CONF.items())
substitutions.update({'tenant_id': project_id, 'user_id': user_id})
services = {}
@@ -66,7 +66,7 @@ class EndpointFilterCatalog(sql.Catalog):
# format catalog
catalog = []
- for service_id, service in six.iteritems(services):
+ for service_id, service in services.items():
formatted_service = {}
formatted_service['id'] = service['id']
formatted_service['type'] = service['type']
diff --git a/keystone-moon/keystone/contrib/endpoint_filter/backends/sql.py b/keystone-moon/keystone/contrib/endpoint_filter/backends/sql.py
index a998423f..53d511e5 100644
--- a/keystone-moon/keystone/contrib/endpoint_filter/backends/sql.py
+++ b/keystone-moon/keystone/contrib/endpoint_filter/backends/sql.py
@@ -13,6 +13,7 @@
# under the License.
from keystone.common import sql
+from keystone.contrib import endpoint_filter
from keystone import exception
from keystone.i18n import _
@@ -52,7 +53,7 @@ class ProjectEndpointGroupMembership(sql.ModelBase, sql.ModelDictMixin):
'project_id'), {})
-class EndpointFilter(object):
+class EndpointFilter(endpoint_filter.Driver):
@sql.handle_conflicts(conflict_type='project_endpoint')
def add_endpoint_to_project(self, endpoint_id, project_id):
@@ -150,9 +151,9 @@ class EndpointFilter(object):
endpoint_group_ref = self._get_endpoint_group(session,
endpoint_group_id)
with session.begin():
- session.delete(endpoint_group_ref)
self._delete_endpoint_group_association_by_endpoint_group(
session, endpoint_group_id)
+ session.delete(endpoint_group_ref)
def get_endpoint_group_in_project(self, endpoint_group_id, project_id):
session = sql.get_session()
diff --git a/keystone-moon/keystone/contrib/endpoint_filter/controllers.py b/keystone-moon/keystone/contrib/endpoint_filter/controllers.py
index dc4ef7a3..eb627c6b 100644
--- a/keystone-moon/keystone/contrib/endpoint_filter/controllers.py
+++ b/keystone-moon/keystone/contrib/endpoint_filter/controllers.py
@@ -49,7 +49,7 @@ class _ControllerBase(controller.V3Controller):
for endpoint in endpoints:
is_candidate = True
- for key, value in six.iteritems(filters):
+ for key, value in filters.items():
if endpoint[key] != value:
is_candidate = False
break
diff --git a/keystone-moon/keystone/contrib/endpoint_filter/core.py b/keystone-moon/keystone/contrib/endpoint_filter/core.py
index 972b65dd..1cb35b1f 100644
--- a/keystone-moon/keystone/contrib/endpoint_filter/core.py
+++ b/keystone-moon/keystone/contrib/endpoint_filter/core.py
@@ -12,6 +12,8 @@
# License for the specific language governing permissions and limitations
# under the License.
+"""Main entry point into the Endpoint Filter service."""
+
import abc
from oslo_config import cfg
@@ -56,6 +58,8 @@ class Manager(manager.Manager):
"""
+ driver_namespace = 'keystone.endpoint_filter'
+
def __init__(self):
super(Manager, self).__init__(CONF.endpoint_filter.driver)
diff --git a/keystone-moon/keystone/contrib/endpoint_filter/migrate_repo/versions/001_add_endpoint_filtering_table.py b/keystone-moon/keystone/contrib/endpoint_filter/migrate_repo/versions/001_add_endpoint_filtering_table.py
index 090e7f47..2aa93a86 100644
--- a/keystone-moon/keystone/contrib/endpoint_filter/migrate_repo/versions/001_add_endpoint_filtering_table.py
+++ b/keystone-moon/keystone/contrib/endpoint_filter/migrate_repo/versions/001_add_endpoint_filtering_table.py
@@ -36,12 +36,3 @@ def upgrade(migrate_engine):
nullable=False))
endpoint_filtering_table.create(migrate_engine, checkfirst=True)
-
-
-def downgrade(migrate_engine):
- meta = sql.MetaData()
- meta.bind = migrate_engine
- # Operations to reverse the above upgrade go here.
- for table_name in ['project_endpoint']:
- table = sql.Table(table_name, meta, autoload=True)
- table.drop()
diff --git a/keystone-moon/keystone/contrib/endpoint_filter/migrate_repo/versions/002_add_endpoint_groups.py b/keystone-moon/keystone/contrib/endpoint_filter/migrate_repo/versions/002_add_endpoint_groups.py
index 5f80160a..2c218b0d 100644
--- a/keystone-moon/keystone/contrib/endpoint_filter/migrate_repo/versions/002_add_endpoint_groups.py
+++ b/keystone-moon/keystone/contrib/endpoint_filter/migrate_repo/versions/002_add_endpoint_groups.py
@@ -39,13 +39,3 @@ def upgrade(migrate_engine):
sql.PrimaryKeyConstraint('endpoint_group_id',
'project_id'))
project_endpoint_group_table.create(migrate_engine, checkfirst=True)
-
-
-def downgrade(migrate_engine):
- meta = sql.MetaData()
- meta.bind = migrate_engine
- # Operations to reverse the above upgrade go here.
- for table_name in ['project_endpoint_group',
- 'endpoint_group']:
- table = sql.Table(table_name, meta, autoload=True)
- table.drop()
diff --git a/keystone-moon/keystone/contrib/endpoint_filter/routers.py b/keystone-moon/keystone/contrib/endpoint_filter/routers.py
index 00c8cd72..285b9df2 100644
--- a/keystone-moon/keystone/contrib/endpoint_filter/routers.py
+++ b/keystone-moon/keystone/contrib/endpoint_filter/routers.py
@@ -36,28 +36,32 @@ class EndpointFilterExtension(wsgi.V3ExtensionRouter):
The API looks like::
- PUT /OS-EP-FILTER/projects/$project_id/endpoints/$endpoint_id
- GET /OS-EP-FILTER/projects/$project_id/endpoints/$endpoint_id
- HEAD /OS-EP-FILTER/projects/$project_id/endpoints/$endpoint_id
- DELETE /OS-EP-FILTER/projects/$project_id/endpoints/$endpoint_id
- GET /OS-EP-FILTER/endpoints/$endpoint_id/projects
- GET /OS-EP-FILTER/projects/$project_id/endpoints
+ PUT /OS-EP-FILTER/projects/{project_id}/endpoints/{endpoint_id}
+ GET /OS-EP-FILTER/projects/{project_id}/endpoints/{endpoint_id}
+ HEAD /OS-EP-FILTER/projects/{project_id}/endpoints/{endpoint_id}
+ DELETE /OS-EP-FILTER/projects/{project_id}/endpoints/{endpoint_id}
+ GET /OS-EP-FILTER/endpoints/{endpoint_id}/projects
+ GET /OS-EP-FILTER/projects/{project_id}/endpoints
+ GET /OS-EP-FILTER/projects/{project_id}/endpoint_groups
GET /OS-EP-FILTER/endpoint_groups
POST /OS-EP-FILTER/endpoint_groups
- GET /OS-EP-FILTER/endpoint_groups/$endpoint_group_id
- HEAD /OS-EP-FILTER/endpoint_groups/$endpoint_group_id
- PATCH /OS-EP-FILTER/endpoint_groups/$endpoint_group_id
- DELETE /OS-EP-FILTER/endpoint_groups/$endpoint_group_id
-
- GET /OS-EP-FILTER/endpoint_groups/$endpoint_group_id/projects
- GET /OS-EP-FILTER/endpoint_groups/$endpoint_group_id/endpoints
-
- PUT /OS-EP-FILTER/endpoint_groups/$endpoint_group/projects/$project_id
- GET /OS-EP-FILTER/endpoint_groups/$endpoint_group/projects/$project_id
- HEAD /OS-EP-FILTER/endpoint_groups/$endpoint_group/projects/$project_id
- DELETE /OS-EP-FILTER/endpoint_groups/$endpoint_group/projects/
- $project_id
+ GET /OS-EP-FILTER/endpoint_groups/{endpoint_group_id}
+ HEAD /OS-EP-FILTER/endpoint_groups/{endpoint_group_id}
+ PATCH /OS-EP-FILTER/endpoint_groups/{endpoint_group_id}
+ DELETE /OS-EP-FILTER/endpoint_groups/{endpoint_group_id}
+
+ GET /OS-EP-FILTER/endpoint_groups/{endpoint_group_id}/projects
+ GET /OS-EP-FILTER/endpoint_groups/{endpoint_group_id}/endpoints
+
+ PUT /OS-EP-FILTER/endpoint_groups/{endpoint_group}/projects/
+ {project_id}
+ GET /OS-EP-FILTER/endpoint_groups/{endpoint_group}/projects/
+ {project_id}
+ HEAD /OS-EP-FILTER/endpoint_groups/{endpoint_group}/projects/
+ {project_id}
+ DELETE /OS-EP-FILTER/endpoint_groups/{endpoint_group}/projects/
+ {project_id}
"""
PATH_PREFIX = '/OS-EP-FILTER'
@@ -101,6 +105,15 @@ class EndpointFilterExtension(wsgi.V3ExtensionRouter):
})
self._add_resource(
mapper, endpoint_group_controller,
+ path=self.PATH_PREFIX + '/projects/{project_id}/endpoint_groups',
+ get_action='list_endpoint_groups_for_project',
+ rel=build_resource_relation(
+ resource_name='project_endpoint_groups'),
+ path_vars={
+ 'project_id': json_home.Parameters.PROJECT_ID,
+ })
+ self._add_resource(
+ mapper, endpoint_group_controller,
path=self.PATH_PREFIX + '/endpoint_groups',
get_action='list_endpoint_groups',
post_action='create_endpoint_group',
diff --git a/keystone-moon/keystone/contrib/endpoint_policy/__init__.py b/keystone-moon/keystone/contrib/endpoint_policy/__init__.py
index 12722dc5..e69de29b 100644
--- a/keystone-moon/keystone/contrib/endpoint_policy/__init__.py
+++ b/keystone-moon/keystone/contrib/endpoint_policy/__init__.py
@@ -1,15 +0,0 @@
-# Copyright 2014 IBM Corp.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from keystone.contrib.endpoint_policy.core import * # noqa
diff --git a/keystone-moon/keystone/contrib/endpoint_policy/backends/sql.py b/keystone-moon/keystone/contrib/endpoint_policy/backends/sql.py
index 484444f1..54792f30 100644
--- a/keystone-moon/keystone/contrib/endpoint_policy/backends/sql.py
+++ b/keystone-moon/keystone/contrib/endpoint_policy/backends/sql.py
@@ -1,5 +1,3 @@
-# Copyright 2014 IBM Corp.
-#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
@@ -12,129 +10,23 @@
# License for the specific language governing permissions and limitations
# under the License.
-import uuid
-
-import sqlalchemy
-
-from keystone.common import sql
-from keystone import exception
-
-
-class PolicyAssociation(sql.ModelBase, sql.ModelDictMixin):
- __tablename__ = 'policy_association'
- attributes = ['policy_id', 'endpoint_id', 'region_id', 'service_id']
- # The id column is never exposed outside this module. It only exists to
- # provide a primary key, given that the real columns we would like to use
- # (endpoint_id, service_id, region_id) can be null
- id = sql.Column(sql.String(64), primary_key=True)
- policy_id = sql.Column(sql.String(64), nullable=False)
- endpoint_id = sql.Column(sql.String(64), nullable=True)
- service_id = sql.Column(sql.String(64), nullable=True)
- region_id = sql.Column(sql.String(64), nullable=True)
- __table_args__ = (sql.UniqueConstraint('endpoint_id', 'service_id',
- 'region_id'), {})
-
- def to_dict(self):
- """Returns the model's attributes as a dictionary.
-
- We override the standard method in order to hide the id column,
- since this only exists to provide the table with a primary key.
-
- """
- d = {}
- for attr in self.__class__.attributes:
- d[attr] = getattr(self, attr)
- return d
-
-
-class EndpointPolicy(object):
-
- def create_policy_association(self, policy_id, endpoint_id=None,
- service_id=None, region_id=None):
- with sql.transaction() as session:
- try:
- # See if there is already a row for this association, and if
- # so, update it with the new policy_id
- query = session.query(PolicyAssociation)
- query = query.filter_by(endpoint_id=endpoint_id)
- query = query.filter_by(service_id=service_id)
- query = query.filter_by(region_id=region_id)
- association = query.one()
- association.policy_id = policy_id
- except sql.NotFound:
- association = PolicyAssociation(id=uuid.uuid4().hex,
- policy_id=policy_id,
- endpoint_id=endpoint_id,
- service_id=service_id,
- region_id=region_id)
- session.add(association)
-
- def check_policy_association(self, policy_id, endpoint_id=None,
- service_id=None, region_id=None):
- sql_constraints = sqlalchemy.and_(
- PolicyAssociation.policy_id == policy_id,
- PolicyAssociation.endpoint_id == endpoint_id,
- PolicyAssociation.service_id == service_id,
- PolicyAssociation.region_id == region_id)
-
- # NOTE(henry-nash): Getting a single value to save object
- # management overhead.
- with sql.transaction() as session:
- if session.query(PolicyAssociation.id).filter(
- sql_constraints).distinct().count() == 0:
- raise exception.PolicyAssociationNotFound()
-
- def delete_policy_association(self, policy_id, endpoint_id=None,
- service_id=None, region_id=None):
- with sql.transaction() as session:
- query = session.query(PolicyAssociation)
- query = query.filter_by(policy_id=policy_id)
- query = query.filter_by(endpoint_id=endpoint_id)
- query = query.filter_by(service_id=service_id)
- query = query.filter_by(region_id=region_id)
- query.delete()
+import logging
- def get_policy_association(self, endpoint_id=None,
- service_id=None, region_id=None):
- sql_constraints = sqlalchemy.and_(
- PolicyAssociation.endpoint_id == endpoint_id,
- PolicyAssociation.service_id == service_id,
- PolicyAssociation.region_id == region_id)
+from oslo_log import versionutils
- try:
- with sql.transaction() as session:
- policy_id = session.query(PolicyAssociation.policy_id).filter(
- sql_constraints).distinct().one()
- return {'policy_id': policy_id}
- except sql.NotFound:
- raise exception.PolicyAssociationNotFound()
+from keystone.endpoint_policy.backends import sql
- def list_associations_for_policy(self, policy_id):
- with sql.transaction() as session:
- query = session.query(PolicyAssociation)
- query = query.filter_by(policy_id=policy_id)
- return [ref.to_dict() for ref in query.all()]
+LOG = logging.getLogger(__name__)
- def delete_association_by_endpoint(self, endpoint_id):
- with sql.transaction() as session:
- query = session.query(PolicyAssociation)
- query = query.filter_by(endpoint_id=endpoint_id)
- query.delete()
+_OLD = 'keystone.contrib.endpoint_policy.backends.sql.EndpointPolicy'
+_NEW = 'keystone.endpoint_policy.backends.sql.EndpointPolicy'
- def delete_association_by_service(self, service_id):
- with sql.transaction() as session:
- query = session.query(PolicyAssociation)
- query = query.filter_by(service_id=service_id)
- query.delete()
- def delete_association_by_region(self, region_id):
- with sql.transaction() as session:
- query = session.query(PolicyAssociation)
- query = query.filter_by(region_id=region_id)
- query.delete()
+class EndpointPolicy(sql.EndpointPolicy):
- def delete_association_by_policy(self, policy_id):
- with sql.transaction() as session:
- query = session.query(PolicyAssociation)
- query = query.filter_by(policy_id=policy_id)
- query.delete()
+ @versionutils.deprecated(versionutils.deprecated.LIBERTY,
+ in_favor_of=_NEW,
+ remove_in=1,
+ what=_OLD)
+ def __init__(self, *args, **kwargs):
+ super(EndpointPolicy, self).__init__(*args, **kwargs)
diff --git a/keystone-moon/keystone/contrib/endpoint_policy/migrate_repo/versions/001_add_endpoint_policy_table.py b/keystone-moon/keystone/contrib/endpoint_policy/migrate_repo/versions/001_add_endpoint_policy_table.py
index c77e4380..5c22f169 100644
--- a/keystone-moon/keystone/contrib/endpoint_policy/migrate_repo/versions/001_add_endpoint_policy_table.py
+++ b/keystone-moon/keystone/contrib/endpoint_policy/migrate_repo/versions/001_add_endpoint_policy_table.py
@@ -38,11 +38,3 @@ def upgrade(migrate_engine):
mysql_charset='utf8')
endpoint_policy_table.create(migrate_engine, checkfirst=True)
-
-
-def downgrade(migrate_engine):
- meta = sql.MetaData()
- meta.bind = migrate_engine
- # Operations to reverse the above upgrade go here.
- table = sql.Table('policy_association', meta, autoload=True)
- table.drop()
diff --git a/keystone-moon/keystone/contrib/endpoint_policy/routers.py b/keystone-moon/keystone/contrib/endpoint_policy/routers.py
index 999d1eed..714d1663 100644
--- a/keystone-moon/keystone/contrib/endpoint_policy/routers.py
+++ b/keystone-moon/keystone/contrib/endpoint_policy/routers.py
@@ -1,5 +1,3 @@
-# Copyright 2014 IBM Corp.
-#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
@@ -12,74 +10,23 @@
# License for the specific language governing permissions and limitations
# under the License.
-import functools
-
-from keystone.common import json_home
-from keystone.common import wsgi
-from keystone.contrib.endpoint_policy import controllers
+import logging
+from oslo_log import versionutils
-build_resource_relation = functools.partial(
- json_home.build_v3_extension_resource_relation,
- extension_name='OS-ENDPOINT-POLICY', extension_version='1.0')
+from keystone.common import wsgi
+LOG = logging.getLogger(__name__)
-class EndpointPolicyExtension(wsgi.V3ExtensionRouter):
+_OLD = 'keystone.contrib.endpoint_policy.routers.EndpointPolicyExtension'
+_NEW = 'keystone.endpoint_policy.routers.Routers'
- PATH_PREFIX = '/OS-ENDPOINT-POLICY'
- def add_routes(self, mapper):
- endpoint_policy_controller = controllers.EndpointPolicyV3Controller()
+class EndpointPolicyExtension(wsgi.Middleware):
- self._add_resource(
- mapper, endpoint_policy_controller,
- path='/endpoints/{endpoint_id}' + self.PATH_PREFIX + '/policy',
- get_head_action='get_policy_for_endpoint',
- rel=build_resource_relation(resource_name='endpoint_policy'),
- path_vars={'endpoint_id': json_home.Parameters.ENDPOINT_ID})
- self._add_resource(
- mapper, endpoint_policy_controller,
- path='/policies/{policy_id}' + self.PATH_PREFIX + '/endpoints',
- get_action='list_endpoints_for_policy',
- rel=build_resource_relation(resource_name='policy_endpoints'),
- path_vars={'policy_id': json_home.Parameters.POLICY_ID})
- self._add_resource(
- mapper, endpoint_policy_controller,
- path=('/policies/{policy_id}' + self.PATH_PREFIX +
- '/endpoints/{endpoint_id}'),
- get_head_action='check_policy_association_for_endpoint',
- put_action='create_policy_association_for_endpoint',
- delete_action='delete_policy_association_for_endpoint',
- rel=build_resource_relation(
- resource_name='endpoint_policy_association'),
- path_vars={
- 'policy_id': json_home.Parameters.POLICY_ID,
- 'endpoint_id': json_home.Parameters.ENDPOINT_ID,
- })
- self._add_resource(
- mapper, endpoint_policy_controller,
- path=('/policies/{policy_id}' + self.PATH_PREFIX +
- '/services/{service_id}'),
- get_head_action='check_policy_association_for_service',
- put_action='create_policy_association_for_service',
- delete_action='delete_policy_association_for_service',
- rel=build_resource_relation(
- resource_name='service_policy_association'),
- path_vars={
- 'policy_id': json_home.Parameters.POLICY_ID,
- 'service_id': json_home.Parameters.SERVICE_ID,
- })
- self._add_resource(
- mapper, endpoint_policy_controller,
- path=('/policies/{policy_id}' + self.PATH_PREFIX +
- '/services/{service_id}/regions/{region_id}'),
- get_head_action='check_policy_association_for_region_and_service',
- put_action='create_policy_association_for_region_and_service',
- delete_action='delete_policy_association_for_region_and_service',
- rel=build_resource_relation(
- resource_name='region_and_service_policy_association'),
- path_vars={
- 'policy_id': json_home.Parameters.POLICY_ID,
- 'service_id': json_home.Parameters.SERVICE_ID,
- 'region_id': json_home.Parameters.REGION_ID,
- })
+ @versionutils.deprecated(versionutils.deprecated.LIBERTY,
+ in_favor_of=_NEW,
+ remove_in=1,
+ what=_OLD)
+ def __init__(self, *args, **kwargs):
+ super(EndpointPolicyExtension, self).__init__(*args, **kwargs)
diff --git a/keystone-moon/keystone/contrib/example/core.py b/keystone-moon/keystone/contrib/example/core.py
index 6e85c7f7..e369dc4d 100644
--- a/keystone-moon/keystone/contrib/example/core.py
+++ b/keystone-moon/keystone/contrib/example/core.py
@@ -12,6 +12,8 @@
# License for the specific language governing permissions and limitations
# under the License.
+"""Main entry point into this Example service."""
+
from oslo_log import log
from keystone.common import dependency
@@ -24,15 +26,18 @@ from keystone import notifications
LOG = log.getLogger(__name__)
+@notifications.listener # NOTE(dstanek): only needed if using event_callbacks
@dependency.provider('example_api')
class ExampleManager(manager.Manager):
- """Example Manager.
+ """Default pivot point for this Example backend.
See :mod:`keystone.common.manager.Manager` for more details on
how this dynamically calls the backend.
"""
+ driver_namespace = 'keystone.example'
+
def __init__(self):
# The following is an example of event callbacks. In this setup,
# ExampleManager's data model is depended on project's data model.
@@ -45,8 +50,8 @@ class ExampleManager(manager.Manager):
# project_created_callback will be invoked whenever a new project is
# created.
- # This information is used when the @dependency.provider decorator acts
- # on the class.
+ # This information is used when the @notifications.listener decorator
+ # acts on the class.
self.event_callbacks = {
notifications.ACTIONS.deleted: {
'project': [self.project_deleted_callback],
diff --git a/keystone-moon/keystone/contrib/example/migrate_repo/versions/001_example_table.py b/keystone-moon/keystone/contrib/example/migrate_repo/versions/001_example_table.py
index 10b7ccc7..35061780 100644
--- a/keystone-moon/keystone/contrib/example/migrate_repo/versions/001_example_table.py
+++ b/keystone-moon/keystone/contrib/example/migrate_repo/versions/001_example_table.py
@@ -30,14 +30,3 @@ def upgrade(migrate_engine):
sql.Column('type', sql.String(255)),
sql.Column('extra', sql.Text()))
service_table.create(migrate_engine, checkfirst=True)
-
-
-def downgrade(migrate_engine):
- # Operations to reverse the above upgrade go here.
- meta = sql.MetaData()
- meta.bind = migrate_engine
-
- tables = ['example']
- for t in tables:
- table = sql.Table(t, meta, autoload=True)
- table.drop(migrate_engine, checkfirst=True)
diff --git a/keystone-moon/keystone/contrib/federation/backends/sql.py b/keystone-moon/keystone/contrib/federation/backends/sql.py
index f2c124d0..ed07c08f 100644
--- a/keystone-moon/keystone/contrib/federation/backends/sql.py
+++ b/keystone-moon/keystone/contrib/federation/backends/sql.py
@@ -17,6 +17,7 @@ from oslo_serialization import jsonutils
from keystone.common import sql
from keystone.contrib.federation import core
from keystone import exception
+from sqlalchemy import orm
class FederationProtocolModel(sql.ModelBase, sql.DictBase):
@@ -44,13 +45,53 @@ class FederationProtocolModel(sql.ModelBase, sql.DictBase):
class IdentityProviderModel(sql.ModelBase, sql.DictBase):
__tablename__ = 'identity_provider'
- attributes = ['id', 'remote_id', 'enabled', 'description']
- mutable_attributes = frozenset(['description', 'enabled', 'remote_id'])
+ attributes = ['id', 'enabled', 'description', 'remote_ids']
+ mutable_attributes = frozenset(['description', 'enabled', 'remote_ids'])
id = sql.Column(sql.String(64), primary_key=True)
- remote_id = sql.Column(sql.String(256), nullable=True)
enabled = sql.Column(sql.Boolean, nullable=False)
description = sql.Column(sql.Text(), nullable=True)
+ remote_ids = orm.relationship('IdPRemoteIdsModel',
+ order_by='IdPRemoteIdsModel.remote_id',
+ cascade='all, delete-orphan')
+
+ @classmethod
+ def from_dict(cls, dictionary):
+ new_dictionary = dictionary.copy()
+ remote_ids_list = new_dictionary.pop('remote_ids', None)
+ if not remote_ids_list:
+ remote_ids_list = []
+ identity_provider = cls(**new_dictionary)
+ remote_ids = []
+ # NOTE(fmarco76): the remote_ids_list contains only remote ids
+ # associated with the IdP because of the "relationship" established in
+ # sqlalchemy and corresponding to the FK in the idp_remote_ids table
+ for remote in remote_ids_list:
+ remote_ids.append(IdPRemoteIdsModel(remote_id=remote))
+ identity_provider.remote_ids = remote_ids
+ return identity_provider
+
+ def to_dict(self):
+ """Return a dictionary with model's attributes."""
+ d = dict()
+ for attr in self.__class__.attributes:
+ d[attr] = getattr(self, attr)
+ d['remote_ids'] = []
+ for remote in self.remote_ids:
+ d['remote_ids'].append(remote.remote_id)
+ return d
+
+
+class IdPRemoteIdsModel(sql.ModelBase, sql.DictBase):
+ __tablename__ = 'idp_remote_ids'
+ attributes = ['idp_id', 'remote_id']
+ mutable_attributes = frozenset(['idp_id', 'remote_id'])
+
+ idp_id = sql.Column(sql.String(64),
+ sql.ForeignKey('identity_provider.id',
+ ondelete='CASCADE'))
+ remote_id = sql.Column(sql.String(255),
+ primary_key=True)
@classmethod
def from_dict(cls, dictionary):
@@ -75,6 +116,7 @@ class MappingModel(sql.ModelBase, sql.DictBase):
@classmethod
def from_dict(cls, dictionary):
new_dictionary = dictionary.copy()
+ new_dictionary['rules'] = jsonutils.dumps(new_dictionary['rules'])
return cls(**new_dictionary)
def to_dict(self):
@@ -82,20 +124,23 @@ class MappingModel(sql.ModelBase, sql.DictBase):
d = dict()
for attr in self.__class__.attributes:
d[attr] = getattr(self, attr)
+ d['rules'] = jsonutils.loads(d['rules'])
return d
class ServiceProviderModel(sql.ModelBase, sql.DictBase):
__tablename__ = 'service_provider'
- attributes = ['auth_url', 'id', 'enabled', 'description', 'sp_url']
+ attributes = ['auth_url', 'id', 'enabled', 'description',
+ 'relay_state_prefix', 'sp_url']
mutable_attributes = frozenset(['auth_url', 'description', 'enabled',
- 'sp_url'])
+ 'relay_state_prefix', 'sp_url'])
id = sql.Column(sql.String(64), primary_key=True)
enabled = sql.Column(sql.Boolean, nullable=False)
description = sql.Column(sql.Text(), nullable=True)
auth_url = sql.Column(sql.String(256), nullable=False)
sp_url = sql.Column(sql.String(256), nullable=False)
+ relay_state_prefix = sql.Column(sql.String(256), nullable=False)
@classmethod
def from_dict(cls, dictionary):
@@ -123,6 +168,7 @@ class Federation(core.Driver):
def delete_idp(self, idp_id):
with sql.transaction() as session:
+ self._delete_assigned_protocols(session, idp_id)
idp_ref = self._get_idp(session, idp_id)
session.delete(idp_ref)
@@ -133,7 +179,7 @@ class Federation(core.Driver):
return idp_ref
def _get_idp_from_remote_id(self, session, remote_id):
- q = session.query(IdentityProviderModel)
+ q = session.query(IdPRemoteIdsModel)
q = q.filter_by(remote_id=remote_id)
try:
return q.one()
@@ -153,8 +199,8 @@ class Federation(core.Driver):
def get_idp_from_remote_id(self, remote_id):
with sql.transaction() as session:
- idp_ref = self._get_idp_from_remote_id(session, remote_id)
- return idp_ref.to_dict()
+ ref = self._get_idp_from_remote_id(session, remote_id)
+ return ref.to_dict()
def update_idp(self, idp_id, idp):
with sql.transaction() as session:
@@ -214,6 +260,11 @@ class Federation(core.Driver):
key_ref = self._get_protocol(session, idp_id, protocol_id)
session.delete(key_ref)
+ def _delete_assigned_protocols(self, session, idp_id):
+ query = session.query(FederationProtocolModel)
+ query = query.filter_by(idp_id=idp_id)
+ query.delete()
+
# Mapping CRUD
def _get_mapping(self, session, mapping_id):
mapping_ref = session.query(MappingModel).get(mapping_id)
@@ -225,7 +276,7 @@ class Federation(core.Driver):
def create_mapping(self, mapping_id, mapping):
ref = {}
ref['id'] = mapping_id
- ref['rules'] = jsonutils.dumps(mapping.get('rules'))
+ ref['rules'] = mapping.get('rules')
with sql.transaction() as session:
mapping_ref = MappingModel.from_dict(ref)
session.add(mapping_ref)
@@ -250,7 +301,7 @@ class Federation(core.Driver):
def update_mapping(self, mapping_id, mapping):
ref = {}
ref['id'] = mapping_id
- ref['rules'] = jsonutils.dumps(mapping.get('rules'))
+ ref['rules'] = mapping.get('rules')
with sql.transaction() as session:
mapping_ref = self._get_mapping(session, mapping_id)
old_mapping = mapping_ref.to_dict()
diff --git a/keystone-moon/keystone/contrib/federation/constants.py b/keystone-moon/keystone/contrib/federation/constants.py
new file mode 100644
index 00000000..afb38494
--- /dev/null
+++ b/keystone-moon/keystone/contrib/federation/constants.py
@@ -0,0 +1,15 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+FEDERATION = 'OS-FEDERATION'
+IDENTITY_PROVIDER = 'OS-FEDERATION:identity_provider'
+PROTOCOL = 'OS-FEDERATION:protocol'
diff --git a/keystone-moon/keystone/contrib/federation/controllers.py b/keystone-moon/keystone/contrib/federation/controllers.py
index 6066a33f..912d45d5 100644
--- a/keystone-moon/keystone/contrib/federation/controllers.py
+++ b/keystone-moon/keystone/contrib/federation/controllers.py
@@ -10,7 +10,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-"""Extensions supporting Federation."""
+"""Workflow logic for the Federation service."""
import string
@@ -55,9 +55,9 @@ class IdentityProvider(_ControllerBase):
collection_name = 'identity_providers'
member_name = 'identity_provider'
- _mutable_parameters = frozenset(['description', 'enabled', 'remote_id'])
+ _mutable_parameters = frozenset(['description', 'enabled', 'remote_ids'])
_public_parameters = frozenset(['id', 'enabled', 'description',
- 'remote_id', 'links'
+ 'remote_ids', 'links'
])
@classmethod
@@ -247,6 +247,36 @@ class MappingController(_ControllerBase):
@dependency.requires('federation_api')
class Auth(auth_controllers.Auth):
+ def _get_sso_origin_host(self, context):
+ """Validate and return originating dashboard URL.
+
+ Make sure the parameter is specified in the request's URL as well its
+ value belongs to a list of trusted dashboards.
+
+ :param context: request's context
+ :raises: exception.ValidationError: ``origin`` query parameter was not
+ specified. The URL is deemed invalid.
+ :raises: exception.Unauthorized: URL specified in origin query
+ parameter does not exist in list of websso trusted dashboards.
+ :returns: URL with the originating dashboard
+
+ """
+ if 'origin' in context['query_string']:
+ origin = context['query_string'].get('origin')
+ host = urllib.parse.unquote_plus(origin)
+ else:
+ msg = _('Request must have an origin query parameter')
+ LOG.error(msg)
+ raise exception.ValidationError(msg)
+
+ if host not in CONF.federation.trusted_dashboard:
+ msg = _('%(host)s is not a trusted dashboard host')
+ msg = msg % {'host': host}
+ LOG.error(msg)
+ raise exception.Unauthorized(msg)
+
+ return host
+
def federated_authentication(self, context, identity_provider, protocol):
"""Authenticate from dedicated url endpoint.
@@ -268,33 +298,23 @@ class Auth(auth_controllers.Auth):
def federated_sso_auth(self, context, protocol_id):
try:
- remote_id_name = CONF.federation.remote_id_attribute
+ remote_id_name = utils.get_remote_id_parameter(protocol_id)
remote_id = context['environment'][remote_id_name]
except KeyError:
msg = _('Missing entity ID from environment')
LOG.error(msg)
raise exception.Unauthorized(msg)
- if 'origin' in context['query_string']:
- origin = context['query_string'].get('origin')
- host = urllib.parse.unquote_plus(origin)
- else:
- msg = _('Request must have an origin query parameter')
- LOG.error(msg)
- raise exception.ValidationError(msg)
+ host = self._get_sso_origin_host(context)
- if host in CONF.federation.trusted_dashboard:
- ref = self.federation_api.get_idp_from_remote_id(remote_id)
- identity_provider = ref['id']
- res = self.federated_authentication(context, identity_provider,
- protocol_id)
- token_id = res.headers['X-Subject-Token']
- return self.render_html_response(host, token_id)
- else:
- msg = _('%(host)s is not a trusted dashboard host')
- msg = msg % {'host': host}
- LOG.error(msg)
- raise exception.Unauthorized(msg)
+ ref = self.federation_api.get_idp_from_remote_id(remote_id)
+ # NOTE(stevemar): the returned object is a simple dict that
+ # contains the idp_id and remote_id.
+ identity_provider = ref['idp_id']
+ res = self.federated_authentication(context, identity_provider,
+ protocol_id)
+ token_id = res.headers['X-Subject-Token']
+ return self.render_html_response(host, token_id)
def render_html_response(self, host, token_id):
"""Forms an HTML Form from a template with autosubmit."""
@@ -309,45 +329,77 @@ class Auth(auth_controllers.Auth):
return webob.Response(body=body, status='200',
headerlist=headers)
- @validation.validated(schema.saml_create, 'auth')
- def create_saml_assertion(self, context, auth):
- """Exchange a scoped token for a SAML assertion.
-
- :param auth: Dictionary that contains a token and service provider id
- :returns: SAML Assertion based on properties from the token
- """
-
+ def _create_base_saml_assertion(self, context, auth):
issuer = CONF.saml.idp_entity_id
sp_id = auth['scope']['service_provider']['id']
service_provider = self.federation_api.get_sp(sp_id)
utils.assert_enabled_service_provider_object(service_provider)
-
sp_url = service_provider.get('sp_url')
- auth_url = service_provider.get('auth_url')
token_id = auth['identity']['token']['id']
token_data = self.token_provider_api.validate_token(token_id)
token_ref = token_model.KeystoneToken(token_id, token_data)
- subject = token_ref.user_name
- roles = token_ref.role_names
if not token_ref.project_scoped:
action = _('Use a project scoped token when attempting to create '
'a SAML assertion')
raise exception.ForbiddenAction(action=action)
+ subject = token_ref.user_name
+ roles = token_ref.role_names
project = token_ref.project_name
+ # NOTE(rodrigods): the domain name is necessary in order to distinguish
+ # between projects and users with the same name in different domains.
+ project_domain_name = token_ref.project_domain_name
+ subject_domain_name = token_ref.user_domain_name
+
generator = keystone_idp.SAMLGenerator()
- response = generator.samlize_token(issuer, sp_url, subject, roles,
- project)
+ response = generator.samlize_token(
+ issuer, sp_url, subject, subject_domain_name,
+ roles, project, project_domain_name)
+ return (response, service_provider)
+
+ def _build_response_headers(self, service_provider):
+ return [('Content-Type', 'text/xml'),
+ ('X-sp-url', six.binary_type(service_provider['sp_url'])),
+ ('X-auth-url', six.binary_type(service_provider['auth_url']))]
+
+ @validation.validated(schema.saml_create, 'auth')
+ def create_saml_assertion(self, context, auth):
+ """Exchange a scoped token for a SAML assertion.
+
+ :param auth: Dictionary that contains a token and service provider ID
+ :returns: SAML Assertion based on properties from the token
+ """
+ t = self._create_base_saml_assertion(context, auth)
+ (response, service_provider) = t
+
+ headers = self._build_response_headers(service_provider)
return wsgi.render_response(body=response.to_string(),
status=('200', 'OK'),
- headers=[('Content-Type', 'text/xml'),
- ('X-sp-url',
- six.binary_type(sp_url)),
- ('X-auth-url',
- six.binary_type(auth_url))])
+ headers=headers)
+
+ @validation.validated(schema.saml_create, 'auth')
+ def create_ecp_assertion(self, context, auth):
+ """Exchange a scoped token for an ECP assertion.
+
+ :param auth: Dictionary that contains a token and service provider ID
+ :returns: ECP Assertion based on properties from the token
+ """
+
+ t = self._create_base_saml_assertion(context, auth)
+ (saml_assertion, service_provider) = t
+ relay_state_prefix = service_provider.get('relay_state_prefix')
+
+ generator = keystone_idp.ECPGenerator()
+ ecp_assertion = generator.generate_ecp(saml_assertion,
+ relay_state_prefix)
+
+ headers = self._build_response_headers(service_provider)
+ return wsgi.render_response(body=ecp_assertion.to_string(),
+ status=('200', 'OK'),
+ headers=headers)
@dependency.requires('assignment_api', 'resource_api')
@@ -404,15 +456,17 @@ class ServiceProvider(_ControllerBase):
member_name = 'service_provider'
_mutable_parameters = frozenset(['auth_url', 'description', 'enabled',
- 'sp_url'])
+ 'relay_state_prefix', 'sp_url'])
_public_parameters = frozenset(['auth_url', 'id', 'enabled', 'description',
- 'links', 'sp_url'])
+ 'links', 'relay_state_prefix', 'sp_url'])
@controller.protected()
@validation.validated(schema.service_provider_create, 'service_provider')
def create_service_provider(self, context, sp_id, service_provider):
service_provider = self._normalize_dict(service_provider)
service_provider.setdefault('enabled', False)
+ service_provider.setdefault('relay_state_prefix',
+ CONF.saml.relay_state_prefix)
ServiceProvider.check_immutable_params(service_provider)
sp_ref = self.federation_api.create_sp(sp_id, service_provider)
response = ServiceProvider.wrap_member(context, sp_ref)
diff --git a/keystone-moon/keystone/contrib/federation/core.py b/keystone-moon/keystone/contrib/federation/core.py
index b596cff7..2ab75ecb 100644
--- a/keystone-moon/keystone/contrib/federation/core.py
+++ b/keystone-moon/keystone/contrib/federation/core.py
@@ -10,7 +10,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-"""Extension supporting Federation."""
+"""Main entry point into the Federation service."""
import abc
@@ -21,6 +21,7 @@ import six
from keystone.common import dependency
from keystone.common import extension
from keystone.common import manager
+from keystone.contrib.federation import utils
from keystone import exception
@@ -41,11 +42,6 @@ EXTENSION_DATA = {
extension.register_admin_extension(EXTENSION_DATA['alias'], EXTENSION_DATA)
extension.register_public_extension(EXTENSION_DATA['alias'], EXTENSION_DATA)
-FEDERATION = 'OS-FEDERATION'
-IDENTITY_PROVIDER = 'OS-FEDERATION:identity_provider'
-PROTOCOL = 'OS-FEDERATION:protocol'
-FEDERATED_DOMAIN_KEYWORD = 'Federated'
-
@dependency.provider('federation_api')
class Manager(manager.Manager):
@@ -55,6 +51,9 @@ class Manager(manager.Manager):
dynamically calls the backend.
"""
+
+ driver_namespace = 'keystone.federation'
+
def __init__(self):
super(Manager, self).__init__(CONF.federation.driver)
@@ -84,6 +83,13 @@ class Manager(manager.Manager):
service_providers = self.driver.get_enabled_service_providers()
return [normalize(sp) for sp in service_providers]
+ def evaluate(self, idp_id, protocol_id, assertion_data):
+ mapping = self.get_mapping_from_idp_and_protocol(idp_id, protocol_id)
+ rules = mapping['rules']
+ rule_processor = utils.RuleProcessor(rules)
+ mapped_properties = rule_processor.process(assertion_data)
+ return mapped_properties, mapping['id']
+
@six.add_metaclass(abc.ABCMeta)
class Driver(object):
diff --git a/keystone-moon/keystone/contrib/federation/idp.py b/keystone-moon/keystone/contrib/federation/idp.py
index bf400135..739fc01a 100644
--- a/keystone-moon/keystone/contrib/federation/idp.py
+++ b/keystone-moon/keystone/contrib/federation/idp.py
@@ -17,17 +17,24 @@ import uuid
from oslo_config import cfg
from oslo_log import log
+from oslo_utils import fileutils
+from oslo_utils import importutils
from oslo_utils import timeutils
import saml2
+from saml2 import client_base
from saml2 import md
+from saml2.profile import ecp
from saml2 import saml
from saml2 import samlp
+from saml2.schema import soapenv
from saml2 import sigver
-import xmldsig
+xmldsig = importutils.try_import("saml2.xmldsig")
+if not xmldsig:
+ xmldsig = importutils.try_import("xmldsig")
+from keystone.common import utils
from keystone import exception
from keystone.i18n import _, _LE
-from keystone.openstack.common import fileutils
LOG = log.getLogger(__name__)
@@ -40,8 +47,8 @@ class SAMLGenerator(object):
def __init__(self):
self.assertion_id = uuid.uuid4().hex
- def samlize_token(self, issuer, recipient, user, roles, project,
- expires_in=None):
+ def samlize_token(self, issuer, recipient, user, user_domain_name, roles,
+ project, project_domain_name, expires_in=None):
"""Convert Keystone attributes to a SAML assertion.
:param issuer: URL of the issuing party
@@ -50,10 +57,14 @@ class SAMLGenerator(object):
:type recipient: string
:param user: User name
:type user: string
+ :param user_domain_name: User Domain name
+ :type user_domain_name: string
:param roles: List of role names
:type roles: list
:param project: Project name
:type project: string
+ :param project_domain_name: Project Domain name
+ :type project_domain_name: string
:param expires_in: Sets how long the assertion is valid for, in seconds
:type expires_in: int
@@ -64,8 +75,8 @@ class SAMLGenerator(object):
status = self._create_status()
saml_issuer = self._create_issuer(issuer)
subject = self._create_subject(user, expiration_time, recipient)
- attribute_statement = self._create_attribute_statement(user, roles,
- project)
+ attribute_statement = self._create_attribute_statement(
+ user, user_domain_name, roles, project, project_domain_name)
authn_statement = self._create_authn_statement(issuer, expiration_time)
signature = self._create_signature()
@@ -84,7 +95,7 @@ class SAMLGenerator(object):
expires_in = CONF.saml.assertion_expiration_time
now = timeutils.utcnow()
future = now + datetime.timedelta(seconds=expires_in)
- return timeutils.isotime(future, subsecond=True)
+ return utils.isotime(future, subsecond=True)
def _create_status(self):
"""Create an object that represents a SAML Status.
@@ -150,58 +161,64 @@ class SAMLGenerator(object):
subject.name_id = name_id
return subject
- def _create_attribute_statement(self, user, roles, project):
+ def _create_attribute_statement(self, user, user_domain_name, roles,
+ project, project_domain_name):
"""Create an object that represents a SAML AttributeStatement.
- <ns0:AttributeStatement
- xmlns:ns0="urn:oasis:names:tc:SAML:2.0:assertion"
- xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance">
+ <ns0:AttributeStatement>
<ns0:Attribute Name="openstack_user">
<ns0:AttributeValue
xsi:type="xs:string">test_user</ns0:AttributeValue>
</ns0:Attribute>
+ <ns0:Attribute Name="openstack_user_domain">
+ <ns0:AttributeValue
+ xsi:type="xs:string">Default</ns0:AttributeValue>
+ </ns0:Attribute>
<ns0:Attribute Name="openstack_roles">
<ns0:AttributeValue
xsi:type="xs:string">admin</ns0:AttributeValue>
<ns0:AttributeValue
xsi:type="xs:string">member</ns0:AttributeValue>
</ns0:Attribute>
- <ns0:Attribute Name="openstack_projects">
+ <ns0:Attribute Name="openstack_project">
<ns0:AttributeValue
xsi:type="xs:string">development</ns0:AttributeValue>
</ns0:Attribute>
+ <ns0:Attribute Name="openstack_project_domain">
+ <ns0:AttributeValue
+ xsi:type="xs:string">Default</ns0:AttributeValue>
+ </ns0:Attribute>
</ns0:AttributeStatement>
:return: XML <AttributeStatement> object
"""
- openstack_user = 'openstack_user'
- user_attribute = saml.Attribute()
- user_attribute.name = openstack_user
- user_value = saml.AttributeValue()
- user_value.set_text(user)
- user_attribute.attribute_value = user_value
-
- openstack_roles = 'openstack_roles'
- roles_attribute = saml.Attribute()
- roles_attribute.name = openstack_roles
-
- for role in roles:
- role_value = saml.AttributeValue()
- role_value.set_text(role)
- roles_attribute.attribute_value.append(role_value)
-
- openstack_project = 'openstack_project'
- project_attribute = saml.Attribute()
- project_attribute.name = openstack_project
- project_value = saml.AttributeValue()
- project_value.set_text(project)
- project_attribute.attribute_value = project_value
+
+ def _build_attribute(attribute_name, attribute_values):
+ attribute = saml.Attribute()
+ attribute.name = attribute_name
+
+ for value in attribute_values:
+ attribute_value = saml.AttributeValue()
+ attribute_value.set_text(value)
+ attribute.attribute_value.append(attribute_value)
+
+ return attribute
+
+ user_attribute = _build_attribute('openstack_user', [user])
+ roles_attribute = _build_attribute('openstack_roles', roles)
+ project_attribute = _build_attribute('openstack_project', [project])
+ project_domain_attribute = _build_attribute(
+ 'openstack_project_domain', [project_domain_name])
+ user_domain_attribute = _build_attribute(
+ 'openstack_user_domain', [user_domain_name])
attribute_statement = saml.AttributeStatement()
attribute_statement.attribute.append(user_attribute)
attribute_statement.attribute.append(roles_attribute)
attribute_statement.attribute.append(project_attribute)
+ attribute_statement.attribute.append(project_domain_attribute)
+ attribute_statement.attribute.append(user_domain_attribute)
return attribute_statement
def _create_authn_statement(self, issuer, expiration_time):
@@ -224,7 +241,7 @@ class SAMLGenerator(object):
"""
authn_statement = saml.AuthnStatement()
- authn_statement.authn_instant = timeutils.isotime()
+ authn_statement.authn_instant = utils.isotime()
authn_statement.session_index = uuid.uuid4().hex
authn_statement.session_not_on_or_after = expiration_time
@@ -261,7 +278,7 @@ class SAMLGenerator(object):
"""
assertion = saml.Assertion()
assertion.id = self.assertion_id
- assertion.issue_instant = timeutils.isotime()
+ assertion.issue_instant = utils.isotime()
assertion.version = '2.0'
assertion.issuer = issuer
assertion.signature = signature
@@ -289,7 +306,7 @@ class SAMLGenerator(object):
response = samlp.Response()
response.id = uuid.uuid4().hex
response.destination = recipient
- response.issue_instant = timeutils.isotime()
+ response.issue_instant = utils.isotime()
response.version = '2.0'
response.issuer = issuer
response.status = status
@@ -397,6 +414,7 @@ def _sign_assertion(assertion):
command_list = [xmlsec_binary, '--sign', '--privkey-pem', certificates,
'--id-attr:ID', 'Assertion']
+ file_path = None
try:
# NOTE(gyee): need to make the namespace prefixes explicit so
# they won't get reassigned when we wrap the assertion into
@@ -405,15 +423,19 @@ def _sign_assertion(assertion):
nspair={'saml': saml2.NAMESPACE,
'xmldsig': xmldsig.NAMESPACE}))
command_list.append(file_path)
- stdout = subprocess.check_output(command_list)
+ stdout = subprocess.check_output(command_list,
+ stderr=subprocess.STDOUT)
except Exception as e:
msg = _LE('Error when signing assertion, reason: %(reason)s')
msg = msg % {'reason': e}
+ if hasattr(e, 'output'):
+ msg += ' output: %(output)s' % {'output': e.output}
LOG.error(msg)
raise exception.SAMLSigningError(reason=e)
finally:
try:
- os.remove(file_path)
+ if file_path:
+ os.remove(file_path)
except OSError:
pass
@@ -556,3 +578,31 @@ class MetadataGenerator(object):
if value is None:
return False
return True
+
+
+class ECPGenerator(object):
+ """A class for generating an ECP assertion."""
+
+ @staticmethod
+ def generate_ecp(saml_assertion, relay_state_prefix):
+ ecp_generator = ECPGenerator()
+ header = ecp_generator._create_header(relay_state_prefix)
+ body = ecp_generator._create_body(saml_assertion)
+ envelope = soapenv.Envelope(header=header, body=body)
+ return envelope
+
+ def _create_header(self, relay_state_prefix):
+ relay_state_text = relay_state_prefix + uuid.uuid4().hex
+ relay_state = ecp.RelayState(actor=client_base.ACTOR,
+ must_understand='1',
+ text=relay_state_text)
+ header = soapenv.Header()
+ header.extension_elements = (
+ [saml2.element_to_extension_element(relay_state)])
+ return header
+
+ def _create_body(self, saml_assertion):
+ body = soapenv.Body()
+ body.extension_elements = (
+ [saml2.element_to_extension_element(saml_assertion)])
+ return body
diff --git a/keystone-moon/keystone/contrib/federation/migrate_repo/versions/001_add_identity_provider_table.py b/keystone-moon/keystone/contrib/federation/migrate_repo/versions/001_add_identity_provider_table.py
index cfb6f2c4..9a4d574b 100644
--- a/keystone-moon/keystone/contrib/federation/migrate_repo/versions/001_add_identity_provider_table.py
+++ b/keystone-moon/keystone/contrib/federation/migrate_repo/versions/001_add_identity_provider_table.py
@@ -40,12 +40,3 @@ def upgrade(migrate_engine):
mysql_charset='utf8')
federation_protocol_table.create(migrate_engine, checkfirst=True)
-
-
-def downgrade(migrate_engine):
- meta = sql.MetaData()
- meta.bind = migrate_engine
- tables = ['federation_protocol', 'identity_provider']
- for table_name in tables:
- table = sql.Table(table_name, meta, autoload=True)
- table.drop()
diff --git a/keystone-moon/keystone/contrib/federation/migrate_repo/versions/002_add_mapping_tables.py b/keystone-moon/keystone/contrib/federation/migrate_repo/versions/002_add_mapping_tables.py
index f827f9a9..9a155f5c 100644
--- a/keystone-moon/keystone/contrib/federation/migrate_repo/versions/002_add_mapping_tables.py
+++ b/keystone-moon/keystone/contrib/federation/migrate_repo/versions/002_add_mapping_tables.py
@@ -25,13 +25,3 @@ def upgrade(migrate_engine):
mysql_engine='InnoDB',
mysql_charset='utf8')
mapping_table.create(migrate_engine, checkfirst=True)
-
-
-def downgrade(migrate_engine):
- meta = sql.MetaData()
- meta.bind = migrate_engine
- # Drop previously created tables
- tables = ['mapping']
- for table_name in tables:
- table = sql.Table(table_name, meta, autoload=True)
- table.drop()
diff --git a/keystone-moon/keystone/contrib/federation/migrate_repo/versions/003_mapping_id_nullable_false.py b/keystone-moon/keystone/contrib/federation/migrate_repo/versions/003_mapping_id_nullable_false.py
index eb8b2378..1731b0d3 100644
--- a/keystone-moon/keystone/contrib/federation/migrate_repo/versions/003_mapping_id_nullable_false.py
+++ b/keystone-moon/keystone/contrib/federation/migrate_repo/versions/003_mapping_id_nullable_false.py
@@ -27,9 +27,3 @@ def upgrade(migrate_engine):
values(mapping_id=''))
migrate_engine.execute(stmt)
federation_protocol.c.mapping_id.alter(nullable=False)
-
-
-def downgrade(migrate_engine):
- meta = sa.MetaData(bind=migrate_engine)
- federation_protocol = sa.Table('federation_protocol', meta, autoload=True)
- federation_protocol.c.mapping_id.alter(nullable=True)
diff --git a/keystone-moon/keystone/contrib/federation/migrate_repo/versions/004_add_remote_id_column.py b/keystone-moon/keystone/contrib/federation/migrate_repo/versions/004_add_remote_id_column.py
index dbe5d1f1..2e0aaf93 100644
--- a/keystone-moon/keystone/contrib/federation/migrate_repo/versions/004_add_remote_id_column.py
+++ b/keystone-moon/keystone/contrib/federation/migrate_repo/versions/004_add_remote_id_column.py
@@ -21,10 +21,3 @@ def upgrade(migrate_engine):
idp_table = utils.get_table(migrate_engine, 'identity_provider')
remote_id = sql.Column('remote_id', sql.String(256), nullable=True)
idp_table.create_column(remote_id)
-
-
-def downgrade(migrate_engine):
- meta = sql.MetaData()
- meta.bind = migrate_engine
- idp_table = utils.get_table(migrate_engine, 'identity_provider')
- idp_table.drop_column('remote_id')
diff --git a/keystone-moon/keystone/contrib/federation/migrate_repo/versions/005_add_service_provider_table.py b/keystone-moon/keystone/contrib/federation/migrate_repo/versions/005_add_service_provider_table.py
index bff6a252..1594f893 100644
--- a/keystone-moon/keystone/contrib/federation/migrate_repo/versions/005_add_service_provider_table.py
+++ b/keystone-moon/keystone/contrib/federation/migrate_repo/versions/005_add_service_provider_table.py
@@ -29,10 +29,3 @@ def upgrade(migrate_engine):
mysql_charset='utf8')
sp_table.create(migrate_engine, checkfirst=True)
-
-
-def downgrade(migrate_engine):
- meta = sql.MetaData()
- meta.bind = migrate_engine
- table = sql.Table('service_provider', meta, autoload=True)
- table.drop()
diff --git a/keystone-moon/keystone/contrib/federation/migrate_repo/versions/006_fixup_service_provider_attributes.py b/keystone-moon/keystone/contrib/federation/migrate_repo/versions/006_fixup_service_provider_attributes.py
index 8a42ce3a..dc18f548 100644
--- a/keystone-moon/keystone/contrib/federation/migrate_repo/versions/006_fixup_service_provider_attributes.py
+++ b/keystone-moon/keystone/contrib/federation/migrate_repo/versions/006_fixup_service_provider_attributes.py
@@ -38,11 +38,3 @@ def upgrade(migrate_engine):
sp_table.c.auth_url.alter(nullable=False)
sp_table.c.sp_url.alter(nullable=False)
-
-
-def downgrade(migrate_engine):
- meta = sql.MetaData()
- meta.bind = migrate_engine
- sp_table = sql.Table(_SP_TABLE_NAME, meta, autoload=True)
- sp_table.c.auth_url.alter(nullable=True)
- sp_table.c.sp_url.alter(nullable=True)
diff --git a/keystone-moon/keystone/contrib/federation/migrate_repo/versions/007_add_remote_id_table.py b/keystone-moon/keystone/contrib/federation/migrate_repo/versions/007_add_remote_id_table.py
new file mode 100644
index 00000000..cd571245
--- /dev/null
+++ b/keystone-moon/keystone/contrib/federation/migrate_repo/versions/007_add_remote_id_table.py
@@ -0,0 +1,41 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import sqlalchemy as orm
+
+
+def upgrade(migrate_engine):
+ meta = orm.MetaData()
+ meta.bind = migrate_engine
+ idp_table = orm.Table('identity_provider', meta, autoload=True)
+ remote_id_table = orm.Table(
+ 'idp_remote_ids',
+ meta,
+ orm.Column('idp_id',
+ orm.String(64),
+ orm.ForeignKey('identity_provider.id',
+ ondelete='CASCADE')),
+ orm.Column('remote_id',
+ orm.String(255),
+ primary_key=True),
+ mysql_engine='InnoDB',
+ mysql_charset='utf8')
+
+ remote_id_table.create(migrate_engine, checkfirst=True)
+
+ select = orm.sql.select([idp_table.c.id, idp_table.c.remote_id])
+ for identity in migrate_engine.execute(select):
+ remote_idp_entry = {'idp_id': identity.id,
+ 'remote_id': identity.remote_id}
+ remote_id_table.insert(remote_idp_entry).execute()
+
+ idp_table.drop_column('remote_id')
diff --git a/keystone-moon/keystone/contrib/federation/migrate_repo/versions/008_add_relay_state_to_sp.py b/keystone-moon/keystone/contrib/federation/migrate_repo/versions/008_add_relay_state_to_sp.py
new file mode 100644
index 00000000..150dcfed
--- /dev/null
+++ b/keystone-moon/keystone/contrib/federation/migrate_repo/versions/008_add_relay_state_to_sp.py
@@ -0,0 +1,39 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from oslo_config import cfg
+from oslo_db.sqlalchemy import utils
+import sqlalchemy as sql
+
+
+CONF = cfg.CONF
+_SP_TABLE_NAME = 'service_provider'
+_RELAY_STATE_PREFIX = 'relay_state_prefix'
+
+
+def upgrade(migrate_engine):
+ meta = sql.MetaData()
+ meta.bind = migrate_engine
+
+ idp_table = utils.get_table(migrate_engine, _SP_TABLE_NAME)
+ relay_state_prefix_default = CONF.saml.relay_state_prefix
+ relay_state_prefix = sql.Column(_RELAY_STATE_PREFIX, sql.String(256),
+ nullable=False,
+ server_default=relay_state_prefix_default)
+ idp_table.create_column(relay_state_prefix)
+
+
+def downgrade(migrate_engine):
+ meta = sql.MetaData()
+ meta.bind = migrate_engine
+ idp_table = utils.get_table(migrate_engine, _SP_TABLE_NAME)
+ idp_table.drop_column(_RELAY_STATE_PREFIX)
diff --git a/keystone-moon/keystone/contrib/federation/routers.py b/keystone-moon/keystone/contrib/federation/routers.py
index 9a6224b7..d8fa8175 100644
--- a/keystone-moon/keystone/contrib/federation/routers.py
+++ b/keystone-moon/keystone/contrib/federation/routers.py
@@ -36,44 +36,45 @@ class FederationExtension(wsgi.V3ExtensionRouter):
The API looks like::
- PUT /OS-FEDERATION/identity_providers/$identity_provider
+ PUT /OS-FEDERATION/identity_providers/{idp_id}
GET /OS-FEDERATION/identity_providers
- GET /OS-FEDERATION/identity_providers/$identity_provider
- DELETE /OS-FEDERATION/identity_providers/$identity_provider
- PATCH /OS-FEDERATION/identity_providers/$identity_provider
+ GET /OS-FEDERATION/identity_providers/{idp_id}
+ DELETE /OS-FEDERATION/identity_providers/{idp_id}
+ PATCH /OS-FEDERATION/identity_providers/{idp_id}
PUT /OS-FEDERATION/identity_providers/
- $identity_provider/protocols/$protocol
+ {idp_id}/protocols/{protocol_id}
GET /OS-FEDERATION/identity_providers/
- $identity_provider/protocols
+ {idp_id}/protocols
GET /OS-FEDERATION/identity_providers/
- $identity_provider/protocols/$protocol
+ {idp_id}/protocols/{protocol_id}
PATCH /OS-FEDERATION/identity_providers/
- $identity_provider/protocols/$protocol
+ {idp_id}/protocols/{protocol_id}
DELETE /OS-FEDERATION/identity_providers/
- $identity_provider/protocols/$protocol
+ {idp_id}/protocols/{protocol_id}
PUT /OS-FEDERATION/mappings
GET /OS-FEDERATION/mappings
- PATCH /OS-FEDERATION/mappings/$mapping_id
- GET /OS-FEDERATION/mappings/$mapping_id
- DELETE /OS-FEDERATION/mappings/$mapping_id
+ PATCH /OS-FEDERATION/mappings/{mapping_id}
+ GET /OS-FEDERATION/mappings/{mapping_id}
+ DELETE /OS-FEDERATION/mappings/{mapping_id}
GET /OS-FEDERATION/projects
GET /OS-FEDERATION/domains
- PUT /OS-FEDERATION/service_providers/$service_provider
+ PUT /OS-FEDERATION/service_providers/{sp_id}
GET /OS-FEDERATION/service_providers
- GET /OS-FEDERATION/service_providers/$service_provider
- DELETE /OS-FEDERATION/service_providers/$service_provider
- PATCH /OS-FEDERATION/service_providers/$service_provider
+ GET /OS-FEDERATION/service_providers/{sp_id}
+ DELETE /OS-FEDERATION/service_providers/{sp_id}
+ PATCH /OS-FEDERATION/service_providers/{sp_id}
- GET /OS-FEDERATION/identity_providers/$identity_provider/
- protocols/$protocol/auth
- POST /OS-FEDERATION/identity_providers/$identity_provider/
- protocols/$protocol/auth
+ GET /OS-FEDERATION/identity_providers/{identity_provider}/
+ protocols/{protocol}/auth
+ POST /OS-FEDERATION/identity_providers/{identity_provider}/
+ protocols/{protocol}/auth
POST /auth/OS-FEDERATION/saml2
+ POST /auth/OS-FEDERATION/saml2/ecp
GET /OS-FEDERATION/saml2/metadata
GET /auth/OS-FEDERATION/websso/{protocol_id}
@@ -191,6 +192,8 @@ class FederationExtension(wsgi.V3ExtensionRouter):
path=self._construct_url('projects'),
get_action='list_projects_for_groups',
rel=build_resource_relation(resource_name='projects'))
+
+ # Auth operations
self._add_resource(
mapper, auth_controller,
path=self._construct_url('identity_providers/{identity_provider}/'
@@ -202,8 +205,6 @@ class FederationExtension(wsgi.V3ExtensionRouter):
'identity_provider': IDP_ID_PARAMETER_RELATION,
'protocol': PROTOCOL_ID_PARAMETER_RELATION,
})
-
- # Auth operations
self._add_resource(
mapper, auth_controller,
path='/auth' + self._construct_url('saml2'),
@@ -211,6 +212,11 @@ class FederationExtension(wsgi.V3ExtensionRouter):
rel=build_resource_relation(resource_name='saml2'))
self._add_resource(
mapper, auth_controller,
+ path='/auth' + self._construct_url('saml2/ecp'),
+ post_action='create_ecp_assertion',
+ rel=build_resource_relation(resource_name='ecp'))
+ self._add_resource(
+ mapper, auth_controller,
path='/auth' + self._construct_url('websso/{protocol_id}'),
get_post_action='federated_sso_auth',
rel=build_resource_relation(resource_name='websso'),
diff --git a/keystone-moon/keystone/contrib/federation/schema.py b/keystone-moon/keystone/contrib/federation/schema.py
index 645e1129..17818a98 100644
--- a/keystone-moon/keystone/contrib/federation/schema.py
+++ b/keystone-moon/keystone/contrib/federation/schema.py
@@ -58,7 +58,8 @@ _service_provider_properties = {
'auth_url': parameter_types.url,
'sp_url': parameter_types.url,
'description': validation.nullable(parameter_types.description),
- 'enabled': parameter_types.boolean
+ 'enabled': parameter_types.boolean,
+ 'relay_state_prefix': validation.nullable(parameter_types.description)
}
service_provider_create = {
diff --git a/keystone-moon/keystone/contrib/federation/utils.py b/keystone-moon/keystone/contrib/federation/utils.py
index 939fe9a0..b0db3cdd 100644
--- a/keystone-moon/keystone/contrib/federation/utils.py
+++ b/keystone-moon/keystone/contrib/federation/utils.py
@@ -21,7 +21,6 @@ from oslo_log import log
from oslo_utils import timeutils
import six
-from keystone.contrib import federation
from keystone import exception
from keystone.i18n import _, _LW
@@ -191,14 +190,37 @@ def validate_groups_cardinality(group_ids, mapping_id):
raise exception.MissingGroups(mapping_id=mapping_id)
-def validate_idp(idp, assertion):
- """Check if the IdP providing the assertion is the one registered for
- the mapping
+def get_remote_id_parameter(protocol):
+ # NOTE(marco-fargetta): Since we support any protocol ID, we attempt to
+ # retrieve the remote_id_attribute of the protocol ID. If it's not
+ # registered in the config, then register the option and try again.
+ # This allows the user to register protocols other than oidc and saml2.
+ remote_id_parameter = None
+ try:
+ remote_id_parameter = CONF[protocol]['remote_id_attribute']
+ except AttributeError:
+ CONF.register_opt(cfg.StrOpt('remote_id_attribute'),
+ group=protocol)
+ try:
+ remote_id_parameter = CONF[protocol]['remote_id_attribute']
+ except AttributeError:
+ pass
+ if not remote_id_parameter:
+ LOG.debug('Cannot find "remote_id_attribute" in configuration '
+ 'group %s. Trying default location in '
+ 'group federation.', protocol)
+ remote_id_parameter = CONF.federation.remote_id_attribute
+
+ return remote_id_parameter
+
+
+def validate_idp(idp, protocol, assertion):
+ """Validate the IdP providing the assertion is registered for the mapping.
"""
- remote_id_parameter = CONF.federation.remote_id_attribute
- if not remote_id_parameter or not idp['remote_id']:
- LOG.warning(_LW('Impossible to identify the IdP %s '),
- idp['id'])
+
+ remote_id_parameter = get_remote_id_parameter(protocol)
+ if not remote_id_parameter or not idp['remote_ids']:
+ LOG.debug('Impossible to identify the IdP %s ', idp['id'])
# If nothing is defined, the administrator may want to
# allow the mapping of every IdP
return
@@ -206,10 +228,9 @@ def validate_idp(idp, assertion):
idp_remote_identifier = assertion[remote_id_parameter]
except KeyError:
msg = _('Could not find Identity Provider identifier in '
- 'environment, check [federation] remote_id_attribute '
- 'for details.')
+ 'environment')
raise exception.ValidationError(msg)
- if idp_remote_identifier != idp['remote_id']:
+ if idp_remote_identifier not in idp['remote_ids']:
msg = _('Incoming identity provider identifier not included '
'among the accepted identifiers.')
raise exception.Forbidden(msg)
@@ -265,7 +286,7 @@ def validate_groups(group_ids, mapping_id, identity_api):
# TODO(marek-denis): Optimize this function, so the number of calls to the
# backend are minimized.
def transform_to_group_ids(group_names, mapping_id,
- identity_api, assignment_api):
+ identity_api, resource_api):
"""Transform groups identitified by name/domain to their ids
Function accepts list of groups identified by a name and domain giving
@@ -296,7 +317,7 @@ def transform_to_group_ids(group_names, mapping_id,
:type mapping_id: str
:param identity_api: identity_api object
- :param assignment_api: assignment_api object
+ :param resource_api: resource manager object
:returns: generator object with group ids
@@ -317,7 +338,7 @@ def transform_to_group_ids(group_names, mapping_id,
"""
domain_id = (domain.get('id') or
- assignment_api.get_domain_by_name(
+ resource_api.get_domain_by_name(
domain.get('name')).get('id'))
return domain_id
@@ -334,7 +355,7 @@ def transform_to_group_ids(group_names, mapping_id,
def get_assertion_params_from_env(context):
LOG.debug('Environment variables: %s', context['environment'])
prefix = CONF.federation.assertion_prefix
- for k, v in context['environment'].items():
+ for k, v in list(context['environment'].items()):
if k.startswith(prefix):
yield (k, v)
@@ -487,8 +508,8 @@ class RuleProcessor(object):
"""
def extract_groups(groups_by_domain):
- for groups in groups_by_domain.values():
- for group in {g['name']: g for g in groups}.values():
+ for groups in list(groups_by_domain.values()):
+ for group in list({g['name']: g for g in groups}.values()):
yield group
def normalize_user(user):
@@ -506,8 +527,7 @@ class RuleProcessor(object):
if user_type == UserType.EPHEMERAL:
user['domain'] = {
- 'id': (CONF.federation.federated_domain_name or
- federation.FEDERATED_DOMAIN_KEYWORD)
+ 'id': CONF.federation.federated_domain_name
}
# initialize the group_ids as a set to eliminate duplicates
@@ -586,7 +606,7 @@ class RuleProcessor(object):
LOG.debug('direct_maps: %s', direct_maps)
LOG.debug('local: %s', local)
new = {}
- for k, v in six.iteritems(local):
+ for k, v in local.items():
if isinstance(v, dict):
new_value = self._update_local_mapping(v, direct_maps)
else:
@@ -644,7 +664,7 @@ class RuleProcessor(object):
}
:returns: identity values used to update local
- :rtype: keystone.contrib.federation.utils.DirectMaps
+ :rtype: keystone.contrib.federation.utils.DirectMaps or None
"""
@@ -686,10 +706,10 @@ class RuleProcessor(object):
# If a blacklist or whitelist is used, we want to map to the
# whole list instead of just its values separately.
- if blacklisted_values:
+ if blacklisted_values is not None:
direct_map_values = [v for v in direct_map_values
if v not in blacklisted_values]
- elif whitelisted_values:
+ elif whitelisted_values is not None:
direct_map_values = [v for v in direct_map_values
if v in whitelisted_values]
diff --git a/keystone-moon/keystone/contrib/moon/algorithms.py b/keystone-moon/keystone/contrib/moon/algorithms.py
index 8644e02d..30305fc1 100644
--- a/keystone-moon/keystone/contrib/moon/algorithms.py
+++ b/keystone-moon/keystone/contrib/moon/algorithms.py
@@ -22,18 +22,19 @@ sub_meta_rule_dict = {
}
rule_dict = [
- ["high", "vm_admin", "medium"],
- ["high", "vm_admin", "low"],
- ["medium", "vm_admin", "low"],
- ["high", "vm_access", "high"],
- ["high", "vm_access", "medium"],
- ["high", "vm_access", "low"],
- ["medium", "vm_access", "medium"],
- ["medium", "vm_access", "low"],
- ["low", "vm_access", "low"]
+ ["high", "vm_admin", "medium", True],
+ ["high", "vm_admin", "low", True],
+ ["medium", "vm_admin", "low", True],
+ ["high", "vm_access", "high", True],
+ ["high", "vm_access", "medium", True],
+ ["high", "vm_access", "low", True],
+ ["medium", "vm_access", "medium", True],
+ ["medium", "vm_access", "low", True],
+ ["low", "vm_access", "low", True]
]
"""
+
def inclusion(authz_buffer, sub_meta_rule_dict, rule_list):
_cat = []
for subject_cat in sub_meta_rule_dict['subject_categories']:
@@ -46,14 +47,10 @@ def inclusion(authz_buffer, sub_meta_rule_dict, rule_list):
if object_cat in authz_buffer['object_assignments']:
_cat.append(authz_buffer['object_assignments'][object_cat])
- print("authz_buffer", authz_buffer)
- print("rule_list", rule_list)
- print("_cat", _cat)
for _element in itertools.product(*_cat):
# Add the boolean at the end
_element = list(_element)
_element.append(True)
- print("_element", _element)
if _element in rule_list:
return True
@@ -66,6 +63,13 @@ def comparison(authz_buffer, sub_meta_rule_dict, rule_list):
def all_true(decision_buffer):
for _rule in decision_buffer:
- if decision_buffer[_rule] is False:
+ if decision_buffer[_rule] == False:
return False
- return True \ No newline at end of file
+ return True
+
+
+def one_true(decision_buffer):
+ for _rule in decision_buffer:
+ if decision_buffer[_rule] == True:
+ return True
+ return False
diff --git a/keystone-moon/keystone/contrib/moon/backends/memory.py b/keystone-moon/keystone/contrib/moon/backends/memory.py
index 675240e5..7a996847 100644
--- a/keystone-moon/keystone/contrib/moon/backends/memory.py
+++ b/keystone-moon/keystone/contrib/moon/backends/memory.py
@@ -6,6 +6,7 @@
from uuid import uuid4
from glob import glob
import os
+import json
from keystone import config
from keystone.contrib.moon.core import ConfigurationDriver
@@ -19,12 +20,12 @@ class ConfigurationConnector(ConfigurationDriver):
super(ConfigurationConnector, self).__init__()
self.aggregation_algorithms_dict = dict()
self.aggregation_algorithms_dict[uuid4().hex] = {'name': 'all_true', 'description': 'all_true'}
+ self.aggregation_algorithms_dict[uuid4().hex] = {'name': 'one_true', 'description': 'one_true'}
self.sub_meta_rule_algorithms_dict = dict()
self.sub_meta_rule_algorithms_dict[uuid4().hex] = {'name': 'inclusion', 'description': 'inclusion'}
self.sub_meta_rule_algorithms_dict[uuid4().hex] = {'name': 'comparison', 'description': 'comparison'}
def get_policy_templates_dict(self):
- # TODO (dthom): this function should return a dictionary of all policy templates as:
"""
:return: {
template_id1: {name: template_name, description: template_description},
@@ -33,11 +34,15 @@ class ConfigurationConnector(ConfigurationDriver):
}
"""
nodes = glob(os.path.join(CONF.moon.policy_directory, "*"))
- return {
- "authz_templates": [os.path.basename(n) for n in nodes if os.path.isdir(n)]
- }
-
- def get_aggregation_algorithm_dict(self):
+ templates = dict()
+ for node in nodes:
+ templates[os.path.basename(node)] = dict()
+ metadata = json.load(open(os.path.join(node, "metadata.json")))
+ templates[os.path.basename(node)]["name"] = metadata["name"]
+ templates[os.path.basename(node)]["description"] = metadata["description"]
+ return templates
+
+ def get_aggregation_algorithms_dict(self):
return self.aggregation_algorithms_dict
def get_sub_meta_rule_algorithms_dict(self):
diff --git a/keystone-moon/keystone/contrib/moon/backends/sql.py b/keystone-moon/keystone/contrib/moon/backends/sql.py
index 7a75af39..cb64c1f7 100644
--- a/keystone-moon/keystone/contrib/moon/backends/sql.py
+++ b/keystone-moon/keystone/contrib/moon/backends/sql.py
@@ -887,7 +887,7 @@ class IntraExtensionConnector(IntraExtensionDriver):
def set_aggregation_algorithm_dict(self, intra_extension_id, aggregation_algorithm_id, aggregation_algorithm_dict):
with sql.transaction() as session:
query = session.query(AggregationAlgorithm)
- query = query.filter_by(intra_extension_id=intra_extension_id, id=aggregation_algorithm_id)
+ query = query.filter_by(intra_extension_id=intra_extension_id)
ref = query.first()
new_ref = AggregationAlgorithm.from_dict(
{
@@ -896,21 +896,18 @@ class IntraExtensionConnector(IntraExtensionDriver):
'intra_extension_id': intra_extension_id
}
)
- if not ref:
- session.add(new_ref)
- else:
- for attr in AggregationAlgorithm.attributes:
- if attr != 'id':
- setattr(ref, attr, getattr(new_ref, attr))
+ if ref:
+ session.delete(ref)
+ session.add(new_ref)
session.flush()
return self.get_aggregation_algorithm_dict(intra_extension_id)
- # def del_aggregation_algorithm(self, intra_extension_id, aggregation_algorithm_id):
- # with sql.transaction() as session:
- # query = session.query(AggregationAlgorithm)
- # query = query.filter_by(intra_extension_id=intra_extension_id, id=aggregation_algorithm_id)
- # ref = query.first()
- # session.delete(ref)
+ def del_aggregation_algorithm(self, intra_extension_id, aggregation_algorithm_id):
+ with sql.transaction() as session:
+ query = session.query(AggregationAlgorithm)
+ query = query.filter_by(intra_extension_id=intra_extension_id, id=aggregation_algorithm_id)
+ ref = query.first()
+ session.delete(ref)
# Getter and Setter for sub_meta_rule
diff --git a/keystone-moon/keystone/contrib/moon/core.py b/keystone-moon/keystone/contrib/moon/core.py
index d82c9fcc..4a68cdaa 100644
--- a/keystone-moon/keystone/contrib/moon/core.py
+++ b/keystone-moon/keystone/contrib/moon/core.py
@@ -25,9 +25,9 @@ from keystone.contrib.moon.algorithms import *
CONF = config.CONF
LOG = log.getLogger(__name__)
-ADMIN_ID = None # default user_id for internal invocation
-ROOT_EXTENSION_ID = None
-ROOT_EXTENSION_MODEL = "policy_root"
+# ADMIN_ID = None # default user_id for internal invocation
+# ROOT_EXTENSION_ID = None
+# ROOT_EXTENSION_MODEL = "policy_root"
_OPTS = [
@@ -52,9 +52,9 @@ _OPTS = [
cfg.StrOpt('policy_directory',
default='/etc/keystone/policies',
help='Local directory where all policies are stored.'),
- cfg.StrOpt('super_extension_directory',
- default='/etc/keystone/super_extension',
- help='Local directory where SuperExtension configuration is stored.'),
+ cfg.StrOpt('root_policy_directory',
+ default='policy_root',
+ help='Local directory where Root IntraExtension configuration is stored.'),
]
CONF.register_opts(_OPTS, group='moon')
@@ -108,29 +108,29 @@ def enforce(action_names, object_name, **extra):
_action_name_list = action_names
_object_name = object_name
- def get_root_extension(self, args, kwargs):
- if not ROOT_EXTENSION_ID:
- global ROOT_EXTENSION_MODEL, ROOT_EXTENSION_ID, ADMIN_ID
- try:
- # if it is the first time we passed here, the root extension may be not initialized
- # specially during unittest. So we raise RootExtensionNotInitialized to authorize the
- # current creation process
- if 'intra_extension_dict' in kwargs:
- intra_extension_dict = kwargs['intra_extension_dict']
- else:
- intra_extension_dict = args[2]
- if isinstance(intra_extension_dict, dict) and \
- "model" in intra_extension_dict and \
- intra_extension_dict["model"] == "policy_root":
- raise RootExtensionNotInitialized()
- except KeyError:
- pass
- return ROOT_EXTENSION_ID
+ # def get_root_extension(self, args, kwargs):
+ # if not ROOT_EXTENSION_ID:
+ # global ROOT_EXTENSION_MODEL, ROOT_EXTENSION_ID, ADMIN_ID
+ # try:
+ # # if it is the first time we passed here, the root extension may be not initialized
+ # # specially during unittest. So we raise RootExtensionNotInitialized to authorize the
+ # # current creation process
+ # if 'intra_extension_dict' in kwargs:
+ # intra_extension_dict = kwargs['intra_extension_dict']
+ # else:
+ # intra_extension_dict = args[2]
+ # if isinstance(intra_extension_dict, dict) and \
+ # "model" in intra_extension_dict and \
+ # intra_extension_dict["model"] == "policy_root":
+ # raise RootExtensionNotInitialized()
+ # except KeyError:
+ # pass
+ # return ROOT_EXTENSION_ID
def wrap(func):
def wrapped(*args, **kwargs):
- global ADMIN_ID, ROOT_EXTENSION_ID
+ # global ADMIN_ID, ROOT_EXTENSION_ID
returned_value_for_func = None
self = args[0]
try:
@@ -140,46 +140,42 @@ def enforce(action_names, object_name, **extra):
intra_extension_id = None
intra_admin_extension_id = None
- try:
- intra_root_extension_id = get_root_extension(self, args, kwargs)
- # FIXME (asteroide): intra_root_extension_id is not used at all...
- except RootExtensionNotInitialized:
- # Root extension is not initialized, the current requested function must be the creation
- # of this root extension
- returned_value_for_func = func(*args, **kwargs)
- # after the creation, we must update ROOT_EXTENSION_ID and ADMIN_ID
- intra_extensions_dict = self.admin_api.driver.get_intra_extensions_dict()
- for ext in intra_extensions_dict:
- if intra_extensions_dict[ext]["model"] == ROOT_EXTENSION_MODEL:
- ROOT_EXTENSION_ID = ext
- break
- if not ROOT_EXTENSION_ID:
- raise RootExtensionUnknown()
- subjects_dict = self.admin_api.driver.get_subjects_dict(returned_value_for_func['id'])
- for subject_id in subjects_dict:
- if subjects_dict[subject_id]["name"] == "admin":
- ADMIN_ID = subject_id
- break
- if not ADMIN_ID:
- raise RootExtensionUnknown()
- # if all is OK, return values from func (creation of the root extension)
- return returned_value_for_func
+ # try:
+ intra_root_extension_id = self.root_api.get_root_extension_id()
+ # except RootExtensionNotInitialized:
+ # # Root extension is not initialized, the current requested function must be the creation
+ # # of this root extension
+ # returned_value_for_func = func(*args, **kwargs)
+ # # after the creation, we must update ROOT_EXTENSION_ID and ADMIN_ID
+ # intra_extensions_dict = self.admin_api.driver.get_intra_extensions_dict()
+ # for ext in intra_extensions_dict:
+ # if intra_extensions_dict[ext]["model"] == ROOT_EXTENSION_MODEL:
+ # ROOT_EXTENSION_ID = ext
+ # break
+ # if not ROOT_EXTENSION_ID:
+ # raise RootExtensionUnknown()
+ # subjects_dict = self.admin_api.driver.get_subjects_dict(returned_value_for_func['id'])
+ # for subject_id in subjects_dict:
+ # if subjects_dict[subject_id]["name"] == "admin":
+ # ADMIN_ID = subject_id
+ # break
+ # if not ADMIN_ID:
+ # raise RootExtensionUnknown()
+ # # if all is OK, return values from func (creation of the root extension)
+ # return returned_value_for_func
try:
intra_extension_id = args[2]
except IndexError:
- print("IndexError", kwargs)
if 'intra_extension_id' in kwargs:
intra_extension_id = kwargs['intra_extension_id']
else:
- print("in else", intra_root_extension_id)
intra_extension_id = intra_root_extension_id
- if ADMIN_ID and user_id == ADMIN_ID:
+ if user_id == self.root_api.get_root_admin_id():
# TODO: check if there is no security hole here
returned_value_for_func = func(*args, **kwargs)
else:
intra_extensions_dict = self.admin_api.driver.get_intra_extensions_dict()
- print(intra_extension_id, intra_extensions_dict)
if intra_extension_id not in intra_extensions_dict:
raise IntraExtensionUnknown()
tenants_dict = self.tenant_api.driver.get_tenants_dict()
@@ -213,7 +209,10 @@ def enforce(action_names, object_name, **extra):
# if we found the object in intra_root_extension_id, so we change the intra_admin_extension_id
# into intra_root_extension_id and we modify the ID of the subject
subjects_dict = self.admin_api.driver.get_subjects_dict(intra_admin_extension_id)
- subject_name = subjects_dict[user_id]["name"]
+ try:
+ subject_name = subjects_dict[user_id]["name"]
+ except KeyError:
+ raise SubjectUnknown()
intra_admin_extension_id = intra_root_extension_id
subjects_dict = self.admin_api.driver.get_subjects_dict(intra_admin_extension_id)
user_id = None
@@ -221,7 +220,7 @@ def enforce(action_names, object_name, **extra):
if subjects_dict[_subject_id]["name"] == subject_name:
user_id = _subject_id
if not user_id:
- raise SubjectUnknown("Subject Unknown for Root intraExtension...")
+ raise SubjectUnknown("Subject {} Unknown for Root IntraExtension...".format(subject_name))
if type(_action_name_list) in (str, unicode):
action_name_list = (_action_name_list, )
else:
@@ -256,9 +255,11 @@ def enforce(action_names, object_name, **extra):
@dependency.provider('configuration_api')
-@dependency.requires('moonlog_api', 'admin_api', 'tenant_api')
+@dependency.requires('moonlog_api', 'admin_api', 'tenant_api', 'root_api')
class ConfigurationManager(manager.Manager):
+ driver_namespace = 'keystone.moon.configuration'
+
def __init__(self):
super(ConfigurationManager, self).__init__(CONF.moon.configuration_driver)
@@ -278,7 +279,7 @@ class ConfigurationManager(manager.Manager):
def get_policy_template_id_from_name(self, user_id, policy_template_name):
policy_templates_dict = self.driver.get_policy_templates_dict()
for policy_template_id in policy_templates_dict:
- if policy_templates_dict[policy_template_id]['name'] is policy_template_name:
+ if policy_templates_dict[policy_template_id]['name'] == policy_template_name:
return policy_template_id
return None
@@ -298,7 +299,7 @@ class ConfigurationManager(manager.Manager):
def get_aggregation_algorithm_id_from_name(self, user_id, aggregation_algorithm_name):
aggregation_algorithms_dict = self.driver.get_aggregation_algorithms_dict()
for aggregation_algorithm_id in aggregation_algorithms_dict:
- if aggregation_algorithms_dict[aggregation_algorithm_id]['name'] is aggregation_algorithm_name:
+ if aggregation_algorithms_dict[aggregation_algorithm_id]['name'] == aggregation_algorithm_name:
return aggregation_algorithm_id
return None
@@ -318,15 +319,17 @@ class ConfigurationManager(manager.Manager):
def get_sub_meta_rule_algorithm_id_from_name(self, sub_meta_rule_algorithm_name):
sub_meta_rule_algorithms_dict = self.driver.get_sub_meta_rule_algorithms_dict()
for sub_meta_rule_algorithm_id in sub_meta_rule_algorithms_dict:
- if sub_meta_rule_algorithms_dict[sub_meta_rule_algorithm_id]['name'] is sub_meta_rule_algorithm_name:
+ if sub_meta_rule_algorithms_dict[sub_meta_rule_algorithm_id]['name'] == sub_meta_rule_algorithm_name:
return sub_meta_rule_algorithm_id
return None
@dependency.provider('tenant_api')
-@dependency.requires('moonlog_api', 'admin_api', 'configuration_api')
+@dependency.requires('moonlog_api', 'admin_api', 'configuration_api', 'root_api', 'resource_api')
class TenantManager(manager.Manager):
+ driver_namespace = 'keystone.moon.tenant'
+
def __init__(self):
super(TenantManager, self).__init__(CONF.moon.tenant_driver)
@@ -348,38 +351,66 @@ class TenantManager(manager.Manager):
"""
return self.driver.get_tenants_dict()
+ def __get_keystone_tenant_dict(self, tenant_id="", tenant_name=""):
+ tenants = self.resource_api.list_projects()
+ for tenant in tenants:
+ if tenant_id and tenant_id == tenant['id']:
+ return tenant
+ if tenant_name and tenant_name == tenant['name']:
+ return tenant
+ if not tenant_id:
+ tenant_id = uuid4().hex
+ if not tenant_name:
+ tenant_name = tenant_id
+ tenant = {
+ "id": tenant_id,
+ "name": tenant_name,
+ "description": "Auto generated tenant from Moon platform",
+ "enabled": True,
+ "domain_id": "default"
+ }
+ keystone_tenant = self.resource_api.create_project(tenant["id"], tenant)
+ return keystone_tenant
+
@filter_input
@enforce(("read", "write"), "tenants")
def add_tenant_dict(self, user_id, tenant_dict):
tenants_dict = self.driver.get_tenants_dict()
for tenant_id in tenants_dict:
- if tenants_dict[tenant_id]['name'] is tenant_dict['name']:
+ if tenants_dict[tenant_id]['name'] == tenant_dict['name']:
raise TenantAddedNameExisting()
+ # Check (and eventually sync) Keystone tenant
+ if 'id' not in tenant_dict:
+ tenant_dict['id'] = None
+ keystone_tenant = self.__get_keystone_tenant_dict(tenant_dict['id'], tenant_dict['name'])
+ tenant_dict.update(keystone_tenant)
# Sync users between intra_authz_extension and intra_admin_extension
if tenant_dict['intra_admin_extension_id']:
if not tenant_dict['intra_authz_extension_id']:
raise TenantNoIntraAuthzExtension()
- authz_subjects_dict = self.admin_api.get_subjects_dict(ADMIN_ID, tenant_dict['intra_authz_extension_id'])
- admin_subjects_dict = self.admin_api.get_subjects_dict(ADMIN_ID, tenant_dict['intra_admin_extension_id'])
- for _subject_id in authz_subjects_dict:
- if _subject_id not in admin_subjects_dict:
- self.admin_api.add_subject_dict(ADMIN_ID, tenant_dict['intra_admin_extension_id'], authz_subjects_dict[_subject_id])
- for _subject_id in admin_subjects_dict:
- if _subject_id not in authz_subjects_dict:
- self.admin_api.add_subject_dict(ADMIN_ID, tenant_dict['intra_authz_extension_id'], admin_subjects_dict[_subject_id])
-
- # TODO (dthom): check whether we can replace the below code by the above one
- # authz_subjects_dict = self.admin_api.get_subjects_dict(ADMIN_ID, tenant_dict['intra_authz_extension_id'])
- # authz_subject_names_list = [authz_subjects_dict[subject_id]["name"] for subject_id in authz_subjects_dict]
- # admin_subjects_dict = self.admin_api.get_subjects_dict(ADMIN_ID, tenant_dict['intra_admin_extension_id'])
- # admin_subject_names_list = [admin_subjects_dict[subject_id]["name"] for subject_id in admin_subjects_dict]
+ # authz_subjects_dict = self.admin_api.get_subjects_dict(self.root_api.get_root_admin_id(), tenant_dict['intra_authz_extension_id'])
+ # admin_subjects_dict = self.admin_api.get_subjects_dict(self.root_api.get_root_admin_id(), tenant_dict['intra_admin_extension_id'])
# for _subject_id in authz_subjects_dict:
- # if authz_subjects_dict[_subject_id]["name"] not in admin_subject_names_list:
- # self.admin_api.add_subject_dict(ADMIN_ID, tenant_dict['intra_admin_extension_id'], authz_subjects_dict[_subject_id])
+ # if _subject_id not in admin_subjects_dict:
+ # self.admin_api.add_subject_dict(self.root_api.get_root_admin_id(), tenant_dict['intra_admin_extension_id'], authz_subjects_dict[_subject_id])
# for _subject_id in admin_subjects_dict:
- # if admin_subjects_dict[_subject_id]["name"] not in authz_subject_names_list:
- # self.admin_api.add_subject_dict(ADMIN_ID, tenant_dict['intra_authz_extension_id'], admin_subjects_dict[_subject_id])
+ # if _subject_id not in authz_subjects_dict:
+ # self.admin_api.add_subject_dict(self.root_api.get_root_admin_id(), tenant_dict['intra_authz_extension_id'], admin_subjects_dict[_subject_id])
+
+ # TODO (ateroide): check whether we can replace the below code by the above one
+ # NOTE (ateroide): at a first glance: no, subject_id changes depending on which intra_extesion is used
+ # we must use name which is constant.
+ authz_subjects_dict = self.admin_api.get_subjects_dict(self.root_api.get_root_admin_id(), tenant_dict['intra_authz_extension_id'])
+ authz_subject_names_list = [authz_subjects_dict[subject_id]["name"] for subject_id in authz_subjects_dict]
+ admin_subjects_dict = self.admin_api.get_subjects_dict(self.root_api.get_root_admin_id(), tenant_dict['intra_admin_extension_id'])
+ admin_subject_names_list = [admin_subjects_dict[subject_id]["name"] for subject_id in admin_subjects_dict]
+ for _subject_id in authz_subjects_dict:
+ if authz_subjects_dict[_subject_id]["name"] not in admin_subject_names_list:
+ self.admin_api.add_subject_dict(self.root_api.get_root_admin_id(), tenant_dict['intra_admin_extension_id'], authz_subjects_dict[_subject_id])
+ for _subject_id in admin_subjects_dict:
+ if admin_subjects_dict[_subject_id]["name"] not in authz_subject_names_list:
+ self.admin_api.add_subject_dict(self.root_api.get_root_admin_id(), tenant_dict['intra_authz_extension_id'], admin_subjects_dict[_subject_id])
return self.driver.add_tenant_dict(tenant_dict['id'], tenant_dict)
@@ -409,52 +440,24 @@ class TenantManager(manager.Manager):
if tenant_dict['intra_admin_extension_id']:
if not tenant_dict['intra_authz_extension_id']:
raise TenantNoIntraAuthzExtension
- authz_subjects_dict = self.admin_api.get_subjects_dict(ADMIN_ID, tenant_dict['intra_authz_extension_id'])
- admin_subjects_dict = self.admin_api.get_subjects_dict(ADMIN_ID, tenant_dict['intra_admin_extension_id'])
+ authz_subjects_dict = self.admin_api.get_subjects_dict(self.root_api.get_root_admin_id(), tenant_dict['intra_authz_extension_id'])
+ authz_subject_names_list = [authz_subjects_dict[subject_id]["name"] for subject_id in authz_subjects_dict]
+ admin_subjects_dict = self.admin_api.get_subjects_dict(self.root_api.get_root_admin_id(), tenant_dict['intra_admin_extension_id'])
+ admin_subject_names_list = [admin_subjects_dict[subject_id]["name"] for subject_id in admin_subjects_dict]
for _subject_id in authz_subjects_dict:
- if _subject_id not in admin_subjects_dict:
- self.admin_api.add_subject_dict(ADMIN_ID, tenant_dict['intra_admin_extension_id'], authz_subjects_dict[_subject_id])
+ if authz_subjects_dict[_subject_id]["name"] not in admin_subject_names_list:
+ self.admin_api.add_subject_dict(self.root_api.get_root_admin_id(), tenant_dict['intra_admin_extension_id'], authz_subjects_dict[_subject_id])
for _subject_id in admin_subjects_dict:
- if _subject_id not in authz_subjects_dict:
- self.admin_api.add_subject_dict(ADMIN_ID, tenant_dict['intra_authz_extension_id'], admin_subjects_dict[_subject_id])
+ if admin_subjects_dict[_subject_id]["name"] not in authz_subject_names_list:
+ self.admin_api.add_subject_dict(self.root_api.get_root_admin_id(), tenant_dict['intra_authz_extension_id'], admin_subjects_dict[_subject_id])
return self.driver.set_tenant_dict(tenant_id, tenant_dict)
- # TODO (dthom): move the following 2 functions to perimeter functions
- @filter_input
- def get_subject_dict_from_keystone_id(self, tenant_id, intra_extension_id, keystone_id):
- tenants_dict = self.driver.get_tenants_dict()
- if tenant_id not in tenants_dict:
- raise TenantUnknown()
- if intra_extension_id not in (tenants_dict[tenant_id]['intra_authz_extension_id'],
- tenants_dict[tenant_id]['intra_admin_extension_id'], ):
- raise IntraExtensionUnknown()
- # Note (asteroide): We used ADMIN_ID because the user requesting this information may only know his keystone_id
- # and not the subject ID in the requested intra_extension.
- subjects_dict = self.admin_api.get_subjects_dict(ADMIN_ID, intra_extension_id)
- for subject_id in subjects_dict:
- if keystone_id is subjects_dict[subject_id]['keystone_id']:
- return {subject_id: subjects_dict[subject_id]}
-
- @filter_input
- def get_subject_dict_from_keystone_name(self, tenant_id, intra_extension_id, keystone_name):
- tenants_dict = self.driver.get_tenants_dict()
- if tenant_id not in tenants_dict:
- raise TenantUnknown()
- if intra_extension_id not in (tenants_dict[tenant_id]['intra_authz_extension_id'],
- tenants_dict[tenant_id]['intra_admin_extension_id'], ):
- raise IntraExtensionUnknown()
- # Note (asteroide): We used ADMIN_ID because the user requesting this information may only know his
- # keystone_name and not the subject ID in the requested intra_extension.
- subjects_dict = self.admin_api.get_subjects_dict(ADMIN_ID, intra_extension_id)
- for subject_id in subjects_dict:
- if keystone_name is subjects_dict[subject_id]['keystone_name']:
- return {subject_id: subjects_dict[subject_id]}
-
-
-@dependency.requires('identity_api', 'tenant_api', 'configuration_api', 'authz_api', 'admin_api', 'moonlog_api')
+@dependency.requires('identity_api', 'tenant_api', 'configuration_api', 'authz_api', 'admin_api', 'moonlog_api', 'root_api')
class IntraExtensionManager(manager.Manager):
+ driver_namespace = 'keystone.moon.intraextension'
+
def __init__(self):
driver = CONF.moon.intraextension_driver
super(IntraExtensionManager, self).__init__(driver)
@@ -501,6 +504,7 @@ class IntraExtensionManager(manager.Manager):
authz_buffer['subject_assignments'] = dict()
authz_buffer['object_assignments'] = dict()
authz_buffer['action_assignments'] = dict()
+
for _subject_category in meta_data_dict['subject_categories']:
authz_buffer['subject_assignments'][_subject_category] = list(subject_assignment_dict[_subject_category])
for _object_category in meta_data_dict['object_categories']:
@@ -543,6 +547,8 @@ class IntraExtensionManager(manager.Manager):
aggregation_algorithm_id = aggregation_algorithm_dict.keys()[0]
if aggregation_algorithm_dict[aggregation_algorithm_id]['name'] == 'all_true':
decision = all_true(decision_buffer)
+ elif aggregation_algorithm_dict[aggregation_algorithm_id]['name'] == 'one_true':
+ decision = one_true(decision_buffer)
if not decision:
raise AuthzException("{} {}-{}-{}".format(intra_extension_id, subject_id, action_id, object_id))
return decision
@@ -607,7 +613,12 @@ class IntraExtensionManager(manager.Manager):
subject_dict = dict()
# We suppose that all subjects can be mapped to a true user in Keystone
for _subject in json_perimeter['subjects']:
- keystone_user = self.identity_api.get_user_by_name(_subject, "default")
+ try:
+ keystone_user = self.identity_api.get_user_by_name(_subject, "default")
+ except exception.UserNotFound:
+ # TODO (asteroide): must add a configuration option to allow that exception
+ # maybe a debug option for unittest
+ keystone_user = {'id': "", 'name': _subject}
subject_id = uuid4().hex
subject_dict[subject_id] = keystone_user
subject_dict[subject_id]['keystone_id'] = keystone_user["id"]
@@ -774,8 +785,6 @@ class IntraExtensionManager(manager.Manager):
sub_rule_id = self.driver.get_uuid_from_name(intra_extension_dict["id"],
sub_rule_name,
self.driver.SUB_META_RULE)
- # print(sub_rule_name)
- # print(self.get_sub_meta_rule_relations("admin", ie["id"]))
# if sub_rule_name not in self.get_sub_meta_rule_relations("admin", ie["id"])["sub_meta_rule_relations"]:
# raise IntraExtensionException("Bad sub_rule_name name {} in rules".format(sub_rule_name))
rules[sub_rule_id] = list()
@@ -833,6 +842,32 @@ class IntraExtensionManager(manager.Manager):
self.__load_rule_file(ie_dict, template_dir)
return ref
+ def load_root_intra_extension_dict(self, policy_template):
+ # Note (asteroide): Only one root Extension is authorized
+ # and this extension is created at the very beginning of the server
+ # so we don't need to use enforce here
+ for key in self.driver.get_intra_extensions_dict():
+ # Note (asteroide): if there is at least one Intra Extension, it implies that
+ # the Root Intra Extension had already been created...
+ return
+ ie_dict = dict()
+ ie_dict['id'] = uuid4().hex
+ ie_dict["name"] = "policy_root"
+ ie_dict["model"] = filter_input(policy_template)
+ ie_dict["genre"] = "admin"
+ ie_dict["description"] = "policy_root"
+ ref = self.driver.set_intra_extension_dict(ie_dict['id'], ie_dict)
+ self.moonlog_api.debug("Creation of IE: {}".format(ref))
+ # read the template given by "model" and populate default variables
+ template_dir = os.path.join(CONF.moon.policy_directory, ie_dict["model"])
+ self.__load_metadata_file(ie_dict, template_dir)
+ self.__load_perimeter_file(ie_dict, template_dir)
+ self.__load_scope_file(ie_dict, template_dir)
+ self.__load_assignment_file(ie_dict, template_dir)
+ self.__load_metarule_file(ie_dict, template_dir)
+ self.__load_rule_file(ie_dict, template_dir)
+ return ref
+
@enforce("read", "intra_extensions")
def get_intra_extension_dict(self, user_id, intra_extension_id):
"""
@@ -858,7 +893,7 @@ class IntraExtensionManager(manager.Manager):
for rule_id in self.driver.get_rules_dict(intra_extension_id, sub_meta_rule_id):
self.driver.del_rule(intra_extension_id, sub_meta_rule_id, rule_id)
self.driver.del_sub_meta_rule(intra_extension_id, sub_meta_rule_id)
- for aggregation_algorithm_id in self.driver.get_aggregation_algorithms_dict(intra_extension_id):
+ for aggregation_algorithm_id in self.driver.get_aggregation_algorithm_dict(intra_extension_id):
self.driver.del_aggregation_algorithm(intra_extension_id, aggregation_algorithm_id)
for subject_id in self.driver.get_subjects_dict(intra_extension_id):
self.driver.del_subject(intra_extension_id, subject_id)
@@ -1049,6 +1084,7 @@ class IntraExtensionManager(manager.Manager):
def add_subject_dict(self, user_id, intra_extension_id, subject_dict):
subjects_dict = self.driver.get_subjects_dict(intra_extension_id)
for subject_id in subjects_dict:
+ print(subjects_dict[subject_id]["name"], subject_dict['name'])
if subjects_dict[subject_id]["name"] == subject_dict['name']:
raise SubjectNameExisting()
# Next line will raise an error if user is not present in Keystone database
@@ -1091,6 +1127,37 @@ class IntraExtensionManager(manager.Manager):
return self.driver.set_subject_dict(intra_extension_id, subject_dict["id"], subject_dict)
@filter_input
+ def get_subject_dict_from_keystone_id(self, tenant_id, intra_extension_id, keystone_id):
+ tenants_dict = self.tenant_api.driver.get_tenants_dict()
+ if tenant_id not in tenants_dict:
+ raise TenantUnknown()
+ if intra_extension_id not in (tenants_dict[tenant_id]['intra_authz_extension_id'],
+ tenants_dict[tenant_id]['intra_admin_extension_id'], ):
+ raise IntraExtensionUnknown()
+ # Note (asteroide): We used self.root_api.get_root_admin_id() because the user requesting this information
+ # may only know his keystone_id and not the subject ID in the requested intra_extension.
+ subjects_dict = self.get_subjects_dict(self.root_api.get_root_admin_id(), intra_extension_id)
+ for subject_id in subjects_dict:
+ if keystone_id == subjects_dict[subject_id]['keystone_id']:
+ return {subject_id: subjects_dict[subject_id]}
+
+ @filter_input
+ def get_subject_dict_from_keystone_name(self, tenant_id, intra_extension_id, keystone_name):
+ tenants_dict = self.tenant_api.driver.get_tenants_dict()
+ if tenant_id not in tenants_dict:
+ raise TenantUnknown()
+ if intra_extension_id not in (tenants_dict[tenant_id]['intra_authz_extension_id'],
+ tenants_dict[tenant_id]['intra_admin_extension_id'], ):
+ raise IntraExtensionUnknown()
+ # Note (asteroide): We used self.root_api.get_root_admin_id() because the user requesting this information
+ # may only know his keystone_name and not the subject ID in the requested intra_extension.
+ subjects_dict = self.get_subjects_dict(self.root_api.get_root_admin_id(), intra_extension_id)
+ for subject_id in subjects_dict:
+ if keystone_name == subjects_dict[subject_id]['keystone_name']:
+ return {subject_id: subjects_dict[subject_id]}
+
+
+ @filter_input
@enforce("read", "objects")
def get_objects_dict(self, user_id, intra_extension_id):
return self.driver.get_objects_dict(intra_extension_id)
@@ -1539,7 +1606,7 @@ class IntraExtensionManager(manager.Manager):
@enforce(("read", "write"), "aggregation_algorithm")
def set_aggregation_algorithm_dict(self, user_id, intra_extension_id, aggregation_algorithm_id, aggregation_algorithm_dict):
if aggregation_algorithm_id:
- if aggregation_algorithm_id not in self.configuration_api.get_aggregation_algorithms_dict(ADMIN_ID):
+ if aggregation_algorithm_id not in self.configuration_api.get_aggregation_algorithms_dict(self.root_api.get_root_admin_id()):
raise AggregationAlgorithmUnknown()
else:
aggregation_algorithm_id = uuid4().hex
@@ -1577,7 +1644,9 @@ class IntraExtensionManager(manager.Manager):
sub_meta_rule_dict['action_categories'] == sub_meta_rules_dict[_sub_meta_rule_id]["action_categories"] and \
sub_meta_rule_dict['algorithm'] == sub_meta_rules_dict[_sub_meta_rule_id]["algorithm"]:
raise SubMetaRuleExisting()
- if sub_meta_rule_dict['algorithm'] not in self.configuration_api.get_sub_meta_rule_algorithms_dict(user_id):
+ algorithm_names = map(lambda x: x['name'],
+ self.configuration_api.get_sub_meta_rule_algorithms_dict(user_id).values())
+ if sub_meta_rule_dict['algorithm'] not in algorithm_names:
raise SubMetaRuleAlgorithmNotExisting()
sub_meta_rule_id = uuid4().hex
# TODO (dthom): add new sub-meta-rule to rule dict
@@ -1682,10 +1751,10 @@ class IntraExtensionAuthzManager(IntraExtensionManager):
elif genre == "admin":
genre = "intra_admin_extension_id"
- tenants_dict = self.tenant_api.get_tenants_dict(ADMIN_ID)
+ tenants_dict = self.tenant_api.get_tenants_dict(self.root_api.get_root_admin_id())
tenant_id = None
for _tenant_id in tenants_dict:
- if tenants_dict[_tenant_id]["name"] is tenant_name:
+ if tenants_dict[_tenant_id]["name"] == tenant_name:
tenant_id = _tenant_id
break
if not tenant_id:
@@ -1697,8 +1766,9 @@ class IntraExtensionAuthzManager(IntraExtensionManager):
subjects_dict = self.driver.get_subjects_dict(intra_extension_id)
subject_id = None
for _subject_id in subjects_dict:
- if subjects_dict[_subject_id]['keystone_name'] is subject_name:
- subject_id = subjects_dict[_subject_id]['keystone_id']
+ if subjects_dict[_subject_id]['keystone_name'] == subject_name:
+ # subject_id = subjects_dict[_subject_id]['keystone_id']
+ subject_id = _subject_id
break
if not subject_id:
raise SubjectUnknown()
@@ -1725,7 +1795,7 @@ class IntraExtensionAuthzManager(IntraExtensionManager):
def add_subject_dict(self, user_id, intra_extension_id, subject_dict):
subject = super(IntraExtensionAuthzManager, self).add_subject_dict(user_id, intra_extension_id, subject_dict)
subject_id, subject_value = subject.iteritems().next()
- tenants_dict = self.tenant_api.get_tenants_dict(ADMIN_ID)
+ tenants_dict = self.tenant_api.get_tenants_dict(self.root_api.get_root_admin_id())
for tenant_id in tenants_dict:
if tenants_dict[tenant_id]["intra_authz_extension_id"] == intra_extension_id:
_subjects = self.driver.get_subjects_dict(tenants_dict[tenant_id]["intra_admin_extension_id"])
@@ -1742,7 +1812,7 @@ class IntraExtensionAuthzManager(IntraExtensionManager):
def del_subject(self, user_id, intra_extension_id, subject_id):
subject_name = self.driver.get_subjects_dict(intra_extension_id)[subject_id]["name"]
super(IntraExtensionAuthzManager, self).del_subject(user_id, intra_extension_id, subject_id)
- tenants_dict = self.tenant_api.get_tenants_dict(ADMIN_ID)
+ tenants_dict = self.tenant_api.get_tenants_dict(self.root_api.get_root_admin_id())
for tenant_id in tenants_dict:
if tenants_dict[tenant_id]["intra_authz_extension_id"] == intra_extension_id:
subject_id = self.driver.get_uuid_from_name(tenants_dict[tenant_id]["intra_admin_extension_id"],
@@ -1760,7 +1830,7 @@ class IntraExtensionAuthzManager(IntraExtensionManager):
def set_subject_dict(self, user_id, intra_extension_id, subject_id, subject_dict):
subject = super(IntraExtensionAuthzManager, self).set_subject_dict(user_id, intra_extension_id, subject_dict)
subject_id, subject_value = subject.iteritems().next()
- tenants_dict = self.tenant_api.get_tenants_dict(ADMIN_ID)
+ tenants_dict = self.tenant_api.get_tenants_dict(self.root_api.get_root_admin_id())
for tenant_id in tenants_dict:
if tenants_dict[tenant_id]["intra_authz_extension_id"] == intra_extension_id:
self.driver.set_subject_dict(tenants_dict[tenant_id]["intra_admin_extension_id"], uuid4().hex, subject_value)
@@ -1770,110 +1840,110 @@ class IntraExtensionAuthzManager(IntraExtensionManager):
break
return subject
- # def add_subject_category(self, user_id, intra_extension_id, subject_category_dict):
- # raise AuthzException()
- #
- # def del_subject_category(self, user_id, intra_extension_id, subject_category_id):
- # raise AuthzException()
- #
- # def set_subject_category(self, user_id, intra_extension_id, subject_category_id, subject_category_dict):
- # raise AuthzException()
- #
- # def add_object_category(self, user_id, intra_extension_id, object_category_dict):
- # raise AuthzException()
- #
- # def del_object_category(self, user_id, intra_extension_id, object_category_id):
- # raise AuthzException()
- #
- # def add_action_category(self, user_id, intra_extension_id, action_category_name):
- # raise AuthzException()
- #
- # def del_action_category(self, user_id, intra_extension_id, action_category_id):
- # raise AuthzException()
- #
- # def add_object_dict(self, user_id, intra_extension_id, object_name):
- # raise AuthzException()
- #
- # def set_object_dict(self, user_id, intra_extension_id, object_id, object_dict):
- # raise AuthzException()
- #
- # def del_object(self, user_id, intra_extension_id, object_id):
- # raise AuthzException()
- #
- # def add_action_dict(self, user_id, intra_extension_id, action_name):
- # raise AuthzException()
- #
- # def set_action_dict(self, user_id, intra_extension_id, action_id, action_dict):
- # raise AuthzException()
- #
- # def del_action(self, user_id, intra_extension_id, action_id):
- # raise AuthzException()
- #
- # def add_subject_scope_dict(self, user_id, intra_extension_id, subject_category_id, subject_scope_dict):
- # raise AuthzException()
- #
- # def del_subject_scope(self, user_id, intra_extension_id, subject_category_id, subject_scope_id):
- # raise AuthzException()
- #
- # def set_subject_scope_dict(self, user_id, intra_extension_id, subject_category_id, subject_scope_id, subject_scope_name):
- # raise AuthzException()
- #
- # def add_object_scope_dict(self, user_id, intra_extension_id, object_category_id, object_scope_name):
- # raise AuthzException()
- #
- # def del_object_scope(self, user_id, intra_extension_id, object_category_id, object_scope_id):
- # raise AuthzException()
- #
- # def set_object_scope_dict(self, user_id, intra_extension_id, object_category_id, object_scope_id, object_scope_name):
- # raise AuthzException()
- #
- # def add_action_scope_dict(self, user_id, intra_extension_id, action_category_id, action_scope_name):
- # raise AuthzException()
- #
- # def del_action_scope(self, user_id, intra_extension_id, action_category_id, action_scope_id):
- # raise AuthzException()
- #
- # def add_subject_assignment_list(self, user_id, intra_extension_id, subject_id, subject_category_id, subject_scope_id):
- # raise AuthzException()
- #
- # def del_subject_assignment(self, user_id, intra_extension_id, subject_id, subject_category_id, subject_scope_id):
- # raise AuthzException()
- #
- # def add_object_assignment_list(self, user_id, intra_extension_id, object_id, object_category_id, object_scope_id):
- # raise AuthzException()
- #
- # def del_object_assignment(self, user_id, intra_extension_id, object_id, object_category_id, object_scope_id):
- # raise AuthzException()
- #
- # def add_action_assignment_list(self, user_id, intra_extension_id, action_id, action_category_id, action_scope_id):
- # raise AuthzException()
- #
- # def del_action_assignment(self, user_id, intra_extension_id, action_id, action_category_id, action_scope_id):
- # raise AuthzException()
- #
- # def set_aggregation_algorithm_dict(self, user_id, intra_extension_id, aggregation_algorithm_id, aggregation_algorithm_dict):
- # raise AuthzException()
- #
- # def del_aggregation_algorithm_dict(self, user_id, intra_extension_id, aggregation_algorithm_id):
- # raise AuthzException()
- #
- # def add_sub_meta_rule_dict(self, user_id, intra_extension_id, sub_meta_rule_dict):
- # raise AuthzException()
- #
- # def del_sub_meta_rule(self, user_id, intra_extension_id, sub_meta_rule_id):
- # raise AuthzException()
- #
- # def set_sub_meta_rule_dict(self, user_id, intra_extension_id, sub_meta_rule_id, sub_meta_rule_dict):
- # raise AuthzException()
- #
- # def add_rule_dict(self, user_id, intra_extension_id, sub_meta_rule_id, rule_list):
- # raise AuthzException()
- #
- # def del_rule(self, user_id, intra_extension_id, sub_meta_rule_id, rule_id):
- # raise AuthzException()
- #
- # def set_rule_dict(self, user_id, intra_extension_id, sub_meta_rule_id, rule_id, rule_list):
- # raise AuthzException()
+ def add_subject_category(self, user_id, intra_extension_id, subject_category_dict):
+ raise AuthzException()
+
+ def del_subject_category(self, user_id, intra_extension_id, subject_category_id):
+ raise AuthzException()
+
+ def set_subject_category(self, user_id, intra_extension_id, subject_category_id, subject_category_dict):
+ raise AuthzException()
+
+ def add_object_category(self, user_id, intra_extension_id, object_category_dict):
+ raise AuthzException()
+
+ def del_object_category(self, user_id, intra_extension_id, object_category_id):
+ raise AuthzException()
+
+ def add_action_category(self, user_id, intra_extension_id, action_category_name):
+ raise AuthzException()
+
+ def del_action_category(self, user_id, intra_extension_id, action_category_id):
+ raise AuthzException()
+
+ def add_object_dict(self, user_id, intra_extension_id, object_name):
+ raise AuthzException()
+
+ def set_object_dict(self, user_id, intra_extension_id, object_id, object_dict):
+ raise AuthzException()
+
+ def del_object(self, user_id, intra_extension_id, object_id):
+ raise AuthzException()
+
+ def add_action_dict(self, user_id, intra_extension_id, action_name):
+ raise AuthzException()
+
+ def set_action_dict(self, user_id, intra_extension_id, action_id, action_dict):
+ raise AuthzException()
+
+ def del_action(self, user_id, intra_extension_id, action_id):
+ raise AuthzException()
+
+ def add_subject_scope_dict(self, user_id, intra_extension_id, subject_category_id, subject_scope_dict):
+ raise AuthzException()
+
+ def del_subject_scope(self, user_id, intra_extension_id, subject_category_id, subject_scope_id):
+ raise AuthzException()
+
+ def set_subject_scope_dict(self, user_id, intra_extension_id, subject_category_id, subject_scope_id, subject_scope_name):
+ raise AuthzException()
+
+ def add_object_scope_dict(self, user_id, intra_extension_id, object_category_id, object_scope_name):
+ raise AuthzException()
+
+ def del_object_scope(self, user_id, intra_extension_id, object_category_id, object_scope_id):
+ raise AuthzException()
+
+ def set_object_scope_dict(self, user_id, intra_extension_id, object_category_id, object_scope_id, object_scope_name):
+ raise AuthzException()
+
+ def add_action_scope_dict(self, user_id, intra_extension_id, action_category_id, action_scope_name):
+ raise AuthzException()
+
+ def del_action_scope(self, user_id, intra_extension_id, action_category_id, action_scope_id):
+ raise AuthzException()
+
+ def add_subject_assignment_list(self, user_id, intra_extension_id, subject_id, subject_category_id, subject_scope_id):
+ raise AuthzException()
+
+ def del_subject_assignment(self, user_id, intra_extension_id, subject_id, subject_category_id, subject_scope_id):
+ raise AuthzException()
+
+ def add_object_assignment_list(self, user_id, intra_extension_id, object_id, object_category_id, object_scope_id):
+ raise AuthzException()
+
+ def del_object_assignment(self, user_id, intra_extension_id, object_id, object_category_id, object_scope_id):
+ raise AuthzException()
+
+ def add_action_assignment_list(self, user_id, intra_extension_id, action_id, action_category_id, action_scope_id):
+ raise AuthzException()
+
+ def del_action_assignment(self, user_id, intra_extension_id, action_id, action_category_id, action_scope_id):
+ raise AuthzException()
+
+ def set_aggregation_algorithm_dict(self, user_id, intra_extension_id, aggregation_algorithm_id, aggregation_algorithm_dict):
+ raise AuthzException()
+
+ def del_aggregation_algorithm_dict(self, user_id, intra_extension_id, aggregation_algorithm_id):
+ raise AuthzException()
+
+ def add_sub_meta_rule_dict(self, user_id, intra_extension_id, sub_meta_rule_dict):
+ raise AuthzException()
+
+ def del_sub_meta_rule(self, user_id, intra_extension_id, sub_meta_rule_id):
+ raise AuthzException()
+
+ def set_sub_meta_rule_dict(self, user_id, intra_extension_id, sub_meta_rule_id, sub_meta_rule_dict):
+ raise AuthzException()
+
+ def add_rule_dict(self, user_id, intra_extension_id, sub_meta_rule_id, rule_list):
+ raise AuthzException()
+
+ def del_rule(self, user_id, intra_extension_id, sub_meta_rule_id, rule_id):
+ raise AuthzException()
+
+ def set_rule_dict(self, user_id, intra_extension_id, sub_meta_rule_id, rule_id, rule_list):
+ raise AuthzException()
@dependency.provider('admin_api')
@@ -1885,7 +1955,7 @@ class IntraExtensionAdminManager(IntraExtensionManager):
def add_subject_dict(self, user_id, intra_extension_id, subject_dict):
subject = super(IntraExtensionAdminManager, self).add_subject_dict(user_id, intra_extension_id, subject_dict)
subject_id, subject_value = subject.iteritems().next()
- tenants_dict = self.tenant_api.get_tenants_dict(ADMIN_ID)
+ tenants_dict = self.tenant_api.get_tenants_dict(self.root_api.get_root_admin_id())
for tenant_id in tenants_dict:
if tenants_dict[tenant_id]["intra_authz_extension_id"] == intra_extension_id:
_subjects = self.driver.get_subjects_dict(tenants_dict[tenant_id]["intra_admin_extension_id"])
@@ -1902,7 +1972,7 @@ class IntraExtensionAdminManager(IntraExtensionManager):
def del_subject(self, user_id, intra_extension_id, subject_id):
subject_name = self.driver.get_subjects_dict(intra_extension_id)[subject_id]["name"]
super(IntraExtensionAdminManager, self).del_subject(user_id, intra_extension_id, subject_id)
- tenants_dict = self.tenant_api.get_tenants_dict(ADMIN_ID)
+ tenants_dict = self.tenant_api.get_tenants_dict(self.root_api.get_root_admin_id())
for tenant_id in tenants_dict:
if tenants_dict[tenant_id]["intra_authz_extension_id"] == intra_extension_id:
subject_id = self.driver.get_uuid_from_name(tenants_dict[tenant_id]["intra_admin_extension_id"],
@@ -1920,7 +1990,7 @@ class IntraExtensionAdminManager(IntraExtensionManager):
def set_subject_dict(self, user_id, intra_extension_id, subject_id, subject_dict):
subject = super(IntraExtensionAdminManager, self).set_subject_dict(user_id, intra_extension_id, subject_dict)
subject_id, subject_value = subject.iteritems().next()
- tenants_dict = self.tenant_api.get_tenants_dict(ADMIN_ID)
+ tenants_dict = self.tenant_api.get_tenants_dict(self.root_api.get_root_admin_id())
for tenant_id in tenants_dict:
if tenants_dict[tenant_id]["intra_authz_extension_id"] == intra_extension_id:
self.driver.set_subject_dict(tenants_dict[tenant_id]["intra_admin_extension_id"], uuid4().hex, subject_value)
@@ -1931,29 +2001,78 @@ class IntraExtensionAdminManager(IntraExtensionManager):
return subject
def add_object_dict(self, user_id, intra_extension_id, object_name):
- raise ObjectsWriteNoAuthorized()
+ if "admin" == self.get_intra_extension_dict(self.root_api.get_root_admin_id(), intra_extension_id)['genre']:
+ raise ObjectsWriteNoAuthorized()
+ return super(IntraExtensionAdminManager, self).add_object_dict(user_id, intra_extension_id, object_name)
def set_object_dict(self, user_id, intra_extension_id, object_id, object_dict):
- raise ObjectsWriteNoAuthorized()
+ if "admin" == self.get_intra_extension_dict(self.root_api.get_root_admin_id(), intra_extension_id)['genre']:
+ raise ObjectsWriteNoAuthorized()
+ return super(IntraExtensionAdminManager, self).set_object_dict(user_id, intra_extension_id, object_id, object_dict)
def del_object(self, user_id, intra_extension_id, object_id):
- raise ObjectsWriteNoAuthorized()
+ if "admin" == self.get_intra_extension_dict(self.root_api.get_root_admin_id(), intra_extension_id)['genre']:
+ raise ObjectsWriteNoAuthorized()
+ return super(IntraExtensionAdminManager, self).del_object(user_id, intra_extension_id, object_id)
def add_action_dict(self, user_id, intra_extension_id, action_name):
- raise ActionsWriteNoAuthorized()
+ if "admin" == self.get_intra_extension_dict(self.root_api.get_root_admin_id(), intra_extension_id)['genre']:
+ raise ActionsWriteNoAuthorized()
+ return super(IntraExtensionAdminManager, self).add_action_dict(user_id, intra_extension_id, action_name)
def set_action_dict(self, user_id, intra_extension_id, action_id, action_dict):
- raise ActionsWriteNoAuthorized()
+ if "admin" == self.get_intra_extension_dict(self.root_api.get_root_admin_id(), intra_extension_id)['genre']:
+ raise ActionsWriteNoAuthorized()
+ return super(IntraExtensionAdminManager, self).set_action_dict(user_id, intra_extension_id, action_id, action_dict)
def del_action(self, user_id, intra_extension_id, action_id):
- raise ActionsWriteNoAuthorized()
+ if "admin" == self.get_intra_extension_dict(self.root_api.get_root_admin_id(), intra_extension_id)['genre']:
+ raise ActionsWriteNoAuthorized()
+ return super(IntraExtensionAdminManager, self).del_action(user_id, intra_extension_id, action_id)
+
+
+@dependency.provider('root_api')
+@dependency.requires('moonlog_api', 'admin_api', 'tenant_api')
+class IntraExtensionRootManager(IntraExtensionManager):
+
+ def __init__(self):
+ super(IntraExtensionRootManager, self).__init__()
+ extensions = self.admin_api.driver.get_intra_extensions_dict()
+ for extension_id, extension_dict in extensions.iteritems():
+ if extension_dict["model"] == CONF.moon.root_policy_directory:
+ self.root_extension_id = extension_id
+ else:
+ extension = self.admin_api.load_root_intra_extension_dict(CONF.moon.root_policy_directory)
+ self.root_extension_id = extension['id']
+ self.root_admin_id = self.__compute_admin_id_for_root_extension()
+
+ def get_root_extension_dict(self):
+ """
+
+ :return: {id: {"name": "xxx"}}
+ """
+ return {self.root_extension_id: self.admin_api.driver.get_intra_extensions_dict()[self.root_extension_id]}
+
+ def __compute_admin_id_for_root_extension(self):
+ for subject_id, subject_dict in self.admin_api.driver.get_subjects_dict(self.root_extension_id).iteritems():
+ if subject_dict["name"] == "admin":
+ return subject_id
+ raise RootExtensionNotInitialized()
+
+ def get_root_extension_id(self):
+ return self.root_extension_id
+
+ def get_root_admin_id(self):
+ return self.root_admin_id
@dependency.provider('moonlog_api')
# Next line is mandatory in order to force keystone to process dependencies.
-@dependency.requires('identity_api', 'tenant_api', 'configuration_api', 'authz_api', 'admin_api')
+@dependency.requires('identity_api', 'tenant_api', 'configuration_api', 'authz_api', 'admin_api', 'root_api')
class LogManager(manager.Manager):
+ driver_namespace = 'keystone.moon.log'
+
def __init__(self):
driver = CONF.moon.log_driver
super(LogManager, self).__init__(driver)
diff --git a/keystone-moon/keystone/contrib/moon/extension.py b/keystone-moon/keystone/contrib/moon/extension.py
deleted file mode 100644
index efee55c5..00000000
--- a/keystone-moon/keystone/contrib/moon/extension.py
+++ /dev/null
@@ -1,740 +0,0 @@
-# Copyright 2015 Open Platform for NFV Project, Inc. and its contributors
-# This software is distributed under the terms and conditions of the 'Apache-2.0'
-# license which can be found in the file 'LICENSE' in this package distribution
-# or at 'http://www.apache.org/licenses/LICENSE-2.0'.
-
-import os.path
-import copy
-import json
-import itertools
-from uuid import uuid4
-import logging
-
-LOG = logging.getLogger("moon.authz")
-
-
-class Metadata:
-
- def __init__(self):
- self.__name = ''
- self.__model = ''
- self.__genre = ''
- self.__description = ''
- self.__subject_categories = list()
- self.__object_categories = list()
- self.__meta_rule = dict()
- self.__meta_rule['sub_meta_rules'] = list()
- self.__meta_rule['aggregation'] = ''
-
- def load_from_json(self, extension_setting_dir):
- metadata_path = os.path.join(extension_setting_dir, 'metadata.json')
- f = open(metadata_path)
- json_metadata = json.load(f)
- self.__name = json_metadata['name']
- self.__model = json_metadata['model']
- self.__genre = json_metadata['genre']
- self.__description = json_metadata['description']
- self.__subject_categories = copy.deepcopy(json_metadata['subject_categories'])
- self.__object_categories = copy.deepcopy(json_metadata['object_categories'])
- self.__meta_rule = copy.deepcopy(json_metadata['meta_rule'])
-
- def get_name(self):
- return self.__name
-
- def get_genre(self):
- return self.__genre
-
- def get_model(self):
- return self.__model
-
- def get_subject_categories(self):
- return self.__subject_categories
-
- def get_object_categories(self):
- return self.__object_categories
-
- def get_meta_rule(self):
- return self.__meta_rule
-
- def get_meta_rule_aggregation(self):
- return self.__meta_rule['aggregation']
-
- def get_data(self):
- data = dict()
- data["name"] = self.get_name()
- data["model"] = self.__model
- data["genre"] = self.__genre
- data["description"] = self.__description
- data["subject_categories"] = self.get_subject_categories()
- data["object_categories"] = self.get_object_categories()
- data["meta_rule"] = dict(self.get_meta_rule())
- return data
-
- def set_data(self, data):
- self.__name = data["name"]
- self.__model = data["model"]
- self.__genre = data["genre"]
- self.__description = data["description"]
- self.__subject_categories = list(data["subject_categories"])
- self.__object_categories = list(data["object_categories"])
- self.__meta_rule = dict(data["meta_rule"])
-
-
-class Configuration:
- def __init__(self):
- self.__subject_category_values = dict()
- # examples: { "role": {"admin", "dev", }, }
- self.__object_category_values = dict()
- self.__rules = list()
-
- def load_from_json(self, extension_setting_dir):
- configuration_path = os.path.join(extension_setting_dir, 'configuration.json')
- f = open(configuration_path)
- json_configuration = json.load(f)
- self.__subject_category_values = copy.deepcopy(json_configuration['subject_category_values'])
- self.__object_category_values = copy.deepcopy(json_configuration['object_category_values'])
- self.__rules = copy.deepcopy(json_configuration['rules']) # TODO: currently a list, will be a dict with sub-meta-rule as key
-
- def get_subject_category_values(self):
- return self.__subject_category_values
-
- def get_object_category_values(self):
- return self.__object_category_values
-
- def get_rules(self):
- return self.__rules
-
- def get_data(self):
- data = dict()
- data["subject_category_values"] = self.get_subject_category_values()
- data["object_category_values"] = self.get_object_category_values()
- data["rules"] = self.get_rules()
- return data
-
- def set_data(self, data):
- self.__subject_category_values = list(data["subject_category_values"])
- self.__object_category_values = list(data["object_category_values"])
- self.__rules = list(data["rules"])
-
-
-class Perimeter:
- def __init__(self):
- self.__subjects = list()
- self.__objects = list()
-
- def load_from_json(self, extension_setting_dir):
- perimeter_path = os.path.join(extension_setting_dir, 'perimeter.json')
- f = open(perimeter_path)
- json_perimeter = json.load(f)
- self.__subjects = copy.deepcopy(json_perimeter['subjects'])
- self.__objects = copy.deepcopy(json_perimeter['objects'])
- # print(self.__subjects)
- # print(self.__objects)
-
- def get_subjects(self):
- return self.__subjects
-
- def get_objects(self):
- return self.__objects
-
- def get_data(self):
- data = dict()
- data["subjects"] = self.get_subjects()
- data["objects"] = self.get_objects()
- return data
-
- def set_data(self, data):
- self.__subjects = list(data["subjects"])
- self.__objects = list(data["objects"])
-
-
-class Assignment:
- def __init__(self):
- self.__subject_category_assignments = dict()
- # examples: { "role": {"user1": {"dev"}, "user2": {"admin",}}, } TODO: limit to one value for each attr
- self.__object_category_assignments = dict()
-
- def load_from_json(self, extension_setting_dir):
- assignment_path = os.path.join(extension_setting_dir, 'assignment.json')
- f = open(assignment_path)
- json_assignment = json.load(f)
-
- self.__subject_category_assignments = dict(copy.deepcopy(json_assignment['subject_category_assignments']))
- self.__object_category_assignments = dict(copy.deepcopy(json_assignment['object_category_assignments']))
-
- def get_subject_category_assignments(self):
- return self.__subject_category_assignments
-
- def get_object_category_assignments(self):
- return self.__object_category_assignments
-
- def get_data(self):
- data = dict()
- data["subject_category_assignments"] = self.get_subject_category_assignments()
- data["object_category_assignments"] = self.get_object_category_assignments()
- return data
-
- def set_data(self, data):
- self.__subject_category_assignments = list(data["subject_category_assignments"])
- self.__object_category_assignments = list(data["object_category_assignments"])
-
-
-class AuthzData:
- def __init__(self, sub, obj, act):
- self.validation = "False" # "OK, KO, Out of Scope" # "auth": False,
- self.subject = sub
- self.object = str(obj)
- self.action = str(act)
- self.type = "" # intra-tenant, inter-tenant, Out of Scope
- self.subject_attrs = dict()
- self.object_attrs = dict()
- self.requesting_tenant = "" # "subject_tenant": subject_tenant,
- self.requested_tenant = "" # "object_tenant": object_tenant,
-
- def __str__(self):
- return """AuthzData:
- validation={}
- subject={}
- object={}
- action={}
- """.format(self.validation, self.subject, self.object, self.action)
-
-
-class Extension:
- def __init__(self):
- self.metadata = Metadata()
- self.configuration = Configuration()
- self.perimeter = Perimeter()
- self.assignment = Assignment()
-
- def load_from_json(self, extension_setting_dir):
- self.metadata.load_from_json(extension_setting_dir)
- self.configuration.load_from_json(extension_setting_dir)
- self.perimeter.load_from_json(extension_setting_dir)
- self.assignment.load_from_json(extension_setting_dir)
-
- def get_name(self):
- return self.metadata.get_name()
-
- def get_genre(self):
- return self.metadata.get_genre()
-
- def authz(self, sub, obj, act):
- authz_data = AuthzData(sub, obj, act)
- # authz_logger.warning('extension/authz request: [sub {}, obj {}, act {}]'.format(sub, obj, act))
-
- if authz_data.subject in self.perimeter.get_subjects() and authz_data.object in self.perimeter.get_objects():
-
- for subject_category in self.metadata.get_subject_categories():
- authz_data.subject_attrs[subject_category] = copy.copy(
- # self.assignment.get_subject_category_attr(subject_category, sub)
- self.assignment.get_subject_category_assignments()[subject_category][sub]
- )
- # authz_logger.warning('extension/authz subject attribute: [subject attr: {}]'.format(
- # #self.assignment.get_subject_category_attr(subject_category, sub))
- # self.assignment.get_subject_category_assignments()[subject_category][sub])
- # )
-
- for object_category in self.metadata.get_object_categories():
- if object_category == 'action':
- authz_data.object_attrs[object_category] = [act]
- # authz_logger.warning('extension/authz object attribute: [object attr: {}]'.format([act]))
- else:
- authz_data.object_attrs[object_category] = copy.copy(
- self.assignment.get_object_category_assignments()[object_category][obj]
- )
- # authz_logger.warning('extension/authz object attribute: [object attr: {}]'.format(
- # self.assignment.get_object_category_assignments()[object_category][obj])
- # )
-
- _aggregation_data = dict()
-
- for sub_meta_rule in self.metadata.get_meta_rule()["sub_meta_rules"].values():
- _tmp_relation_args = list()
-
- for sub_subject_category in sub_meta_rule["subject_categories"]:
- _tmp_relation_args.append(authz_data.subject_attrs[sub_subject_category])
-
- for sub_object_category in sub_meta_rule["object_categories"]:
- _tmp_relation_args.append(authz_data.object_attrs[sub_object_category])
-
- _relation_args = list(itertools.product(*_tmp_relation_args))
-
- if sub_meta_rule['relation'] == 'relation_super': # TODO: replace by Prolog Engine
- _aggregation_data['relation_super'] = dict()
- _aggregation_data['relation_super']['result'] = False
- for _relation_arg in _relation_args:
- if list(_relation_arg) in self.configuration.get_rules()[sub_meta_rule['relation']]:
- # authz_logger.warning(
- # 'extension/authz relation super OK: [sub_sl: {}, obj_sl: {}, action: {}]'.format(
- # _relation_arg[0], _relation_arg[1], _relation_arg[2]
- # )
- # )
- _aggregation_data['relation_super']['result'] = True
- break
- _aggregation_data['relation_super']['status'] = 'finished'
-
- elif sub_meta_rule['relation'] == 'permission':
- _aggregation_data['permission'] = dict()
- _aggregation_data['permission']['result'] = False
- for _relation_arg in _relation_args:
- if list(_relation_arg) in self.configuration.get_rules()[sub_meta_rule['relation']]:
- # authz_logger.warning(
- # 'extension/authz relation permission OK: [role: {}, object: {}, action: {}]'.format(
- # _relation_arg[0], _relation_arg[1], _relation_arg[2]
- # )
- # )
- _aggregation_data['permission']['result'] = True
- break
- _aggregation_data['permission']['status'] = 'finished'
-
- if self.metadata.get_meta_rule_aggregation() == 'and_true_aggregation':
- authz_data.validation = "OK"
- for relation in _aggregation_data:
- if _aggregation_data[relation]['status'] == 'finished' \
- and _aggregation_data[relation]['result'] == False:
- authz_data.validation = "KO"
- else:
- authz_data.validation = 'Out of Scope'
-
- return authz_data.validation
-
- # ---------------- metadate api ----------------
-
- def get_subject_categories(self):
- return self.metadata.get_subject_categories()
-
- def add_subject_category(self, category_id):
- if category_id in self.get_subject_categories():
- return "[ERROR] Add Subject Category: Subject Category Exists"
- else:
- self.get_subject_categories().append(category_id)
- self.configuration.get_subject_category_values()[category_id] = list()
- self.assignment.get_subject_category_assignments()[category_id] = dict()
- return self.get_subject_categories()
-
- def del_subject_category(self, category_id):
- if category_id in self.get_subject_categories():
- self.configuration.get_subject_category_values().pop(category_id)
- self.assignment.get_subject_category_assignments().pop(category_id)
- self.get_subject_categories().remove(category_id)
- return self.get_subject_categories()
- else:
- return "[ERROR] Del Subject Category: Subject Category Unknown"
-
- def get_object_categories(self):
- return self.metadata.get_object_categories()
-
- def add_object_category(self, category_id):
- if category_id in self.get_object_categories():
- return "[ERROR] Add Object Category: Object Category Exists"
- else:
- self.get_object_categories().append(category_id)
- self.configuration.get_object_category_values()[category_id] = list()
- self.assignment.get_object_category_assignments()[category_id] = dict()
- return self.get_object_categories()
-
- def del_object_category(self, category_id):
- if category_id in self.get_object_categories():
- self.configuration.get_object_category_values().pop(category_id)
- self.assignment.get_object_category_assignments().pop(category_id)
- self.get_object_categories().remove(category_id)
- return self.get_object_categories()
- else:
- return "[ERROR] Del Object Category: Object Category Unknown"
-
- def get_meta_rule(self):
- return self.metadata.get_meta_rule()
-
- # ---------------- configuration api ----------------
-
- def get_subject_category_values(self, category_id):
- return self.configuration.get_subject_category_values()[category_id]
-
- def add_subject_category_value(self, category_id, category_value):
- if category_value in self.configuration.get_subject_category_values()[category_id]:
- return "[ERROR] Add Subject Category Value: Subject Category Value Exists"
- else:
- self.configuration.get_subject_category_values()[category_id].append(category_value)
- return self.configuration.get_subject_category_values()[category_id]
-
- def del_subject_category_value(self, category_id, category_value):
- if category_value in self.configuration.get_subject_category_values()[category_id]:
- self.configuration.get_subject_category_values()[category_id].remove(category_value)
- return self.configuration.get_subject_category_values()[category_id]
- else:
- return "[ERROR] Del Subject Category Value: Subject Category Value Unknown"
-
- def get_object_category_values(self, category_id):
- return self.configuration.get_object_category_values()[category_id]
-
- def add_object_category_value(self, category_id, category_value):
- if category_value in self.configuration.get_object_category_values()[category_id]:
- return "[ERROR] Add Object Category Value: Object Category Value Exists"
- else:
- self.configuration.get_object_category_values()[category_id].append(category_value)
- return self.configuration.get_object_category_values()[category_id]
-
- def del_object_category_value(self, category_id, category_value):
- if category_value in self.configuration.get_object_category_values()[category_id]:
- self.configuration.get_object_category_values()[category_id].remove(category_value)
- return self.configuration.get_object_category_values()[category_id]
- else:
- return "[ERROR] Del Object Category Value: Object Category Value Unknown"
-
- def get_meta_rules(self):
- return self.metadata.get_meta_rule()
-
- def _build_rule_from_list(self, relation, rule):
- rule = list(rule)
- _rule = dict()
- _rule["sub_cat_value"] = dict()
- _rule["obj_cat_value"] = dict()
- if relation in self.metadata.get_meta_rule()["sub_meta_rules"]:
- _rule["sub_cat_value"][relation] = dict()
- _rule["obj_cat_value"][relation] = dict()
- for s_category in self.metadata.get_meta_rule()["sub_meta_rules"][relation]["subject_categories"]:
- _rule["sub_cat_value"][relation][s_category] = rule.pop(0)
- for o_category in self.metadata.get_meta_rule()["sub_meta_rules"][relation]["object_categories"]:
- _rule["obj_cat_value"][relation][o_category] = rule.pop(0)
- return _rule
-
- def get_rules(self, full=False):
- if not full:
- return self.configuration.get_rules()
- rules = dict()
- for key in self.configuration.get_rules():
- rules[key] = map(lambda x: self._build_rule_from_list(key, x), self.configuration.get_rules()[key])
- return rules
-
- def add_rule(self, sub_cat_value_dict, obj_cat_value_dict):
- for _relation in self.metadata.get_meta_rule()["sub_meta_rules"]:
- _sub_rule = list()
- for sub_subject_category in self.metadata.get_meta_rule()["sub_meta_rules"][_relation]["subject_categories"]:
- try:
- if sub_cat_value_dict[_relation][sub_subject_category] \
- in self.configuration.get_subject_category_values()[sub_subject_category]:
- _sub_rule.append(sub_cat_value_dict[_relation][sub_subject_category])
- else:
- return "[Error] Add Rule: Subject Category Value Unknown"
- except KeyError as e:
- # DThom: sometimes relation attribute is buggy, I don't know why...
- print(e)
-
- #BUG: when adding a new category in rules despite it was previously adding
- # data = {
- # "sub_cat_value":
- # {"relation_super":
- # {"subject_security_level": "high", "AMH_CAT": "AMH_VAL"}
- # },
- # "obj_cat_value":
- # {"relation_super":
- # {"object_security_level": "medium"}
- # }
- # }
- # traceback = """
- # Traceback (most recent call last):
- # File "/moon/gui/views_json.py", line 20, in wrapped
- # result = function(*args, **kwargs)
- # File "/moon/gui/views_json.py", line 429, in rules
- # obj_cat_value=filter_input(data["obj_cat_value"]))
- # File "/usr/local/lib/python2.7/dist-packages/moon/core/pap/core.py", line 380, in add_rule
- # obj_cat_value)
- # File "/usr/local/lib/python2.7/dist-packages/moon/core/pdp/extension.py", line 414, in add_rule
- # if obj_cat_value_dict[_relation][sub_object_category] \
- # KeyError: u'action'
- # """
- for sub_object_category in self.metadata.get_meta_rule()["sub_meta_rules"][_relation]["object_categories"]:
- if obj_cat_value_dict[_relation][sub_object_category] \
- in self.configuration.get_object_category_values()[sub_object_category]:
- _sub_rule.append(obj_cat_value_dict[_relation][sub_object_category])
- else:
- return "[Error] Add Rule: Object Category Value Unknown"
-
- if _sub_rule in self.configuration.get_rules()[_relation]:
- return "[Error] Add Rule: Rule Exists"
- else:
- self.configuration.get_rules()[_relation].append(_sub_rule)
- return {
- sub_cat_value_dict.keys()[0]: ({
- "sub_cat_value": copy.deepcopy(sub_cat_value_dict),
- "obj_cat_value": copy.deepcopy(obj_cat_value_dict)
- }, )
- }
- return self.configuration.get_rules()
-
- def del_rule(self, sub_cat_value_dict, obj_cat_value_dict):
- for _relation in self.metadata.get_meta_rule()["sub_meta_rules"]:
- _sub_rule = list()
- for sub_subject_category in self.metadata.get_meta_rule()["sub_meta_rules"][_relation]["subject_categories"]:
- _sub_rule.append(sub_cat_value_dict[_relation][sub_subject_category])
-
- for sub_object_category in self.metadata.get_meta_rule()["sub_meta_rules"][_relation]["object_categories"]:
- _sub_rule.append(obj_cat_value_dict[_relation][sub_object_category])
-
- if _sub_rule in self.configuration.get_rules()[_relation]:
- self.configuration.get_rules()[_relation].remove(_sub_rule)
- else:
- return "[Error] Del Rule: Rule Unknown"
- return self.configuration.get_rules()
-
- # ---------------- perimeter api ----------------
-
- def get_subjects(self):
- return self.perimeter.get_subjects()
-
- def get_objects(self):
- return self.perimeter.get_objects()
-
- def add_subject(self, subject_id):
- if subject_id in self.perimeter.get_subjects():
- return "[ERROR] Add Subject: Subject Exists"
- else:
- self.perimeter.get_subjects().append(subject_id)
- return self.perimeter.get_subjects()
-
- def del_subject(self, subject_id):
- if subject_id in self.perimeter.get_subjects():
- self.perimeter.get_subjects().remove(subject_id)
- return self.perimeter.get_subjects()
- else:
- return "[ERROR] Del Subject: Subject Unknown"
-
- def add_object(self, object_id):
- if object_id in self.perimeter.get_objects():
- return "[ERROR] Add Object: Object Exists"
- else:
- self.perimeter.get_objects().append(object_id)
- return self.perimeter.get_objects()
-
- def del_object(self, object_id):
- if object_id in self.perimeter.get_objects():
- self.perimeter.get_objects().remove(object_id)
- return self.perimeter.get_objects()
- else:
- return "[ERROR] Del Object: Object Unknown"
-
- # ---------------- assignment api ----------------
-
- def get_subject_assignments(self, category_id):
- if category_id in self.metadata.get_subject_categories():
- return self.assignment.get_subject_category_assignments()[category_id]
- else:
- return "[ERROR] Get Subject Assignment: Subject Category Unknown"
-
- def add_subject_assignment(self, category_id, subject_id, category_value):
- if category_id in self.metadata.get_subject_categories():
- if subject_id in self.perimeter.get_subjects():
- if category_value in self.configuration.get_subject_category_values()[category_id]:
- if category_id in self.assignment.get_subject_category_assignments().keys():
- if subject_id in self.assignment.get_subject_category_assignments()[category_id].keys():
- if category_value in self.assignment.get_subject_category_assignments()[category_id][subject_id]:
- return "[ERROR] Add Subject Assignment: Subject Assignment Exists"
- else:
- self.assignment.get_subject_category_assignments()[category_id][subject_id].extend([category_value])
- else:
- self.assignment.get_subject_category_assignments()[category_id][subject_id] = [category_value]
- else:
- self.assignment.get_subject_category_assignments()[category_id] = {subject_id: [category_value]}
- return self.assignment.get_subject_category_assignments()
- else:
- return "[ERROR] Add Subject Assignment: Subject Category Value Unknown"
- else:
- return "[ERROR] Add Subject Assignment: Subject Unknown"
- else:
- return "[ERROR] Add Subject Assignment: Subject Category Unknown"
-
- def del_subject_assignment(self, category_id, subject_id, category_value):
- if category_id in self.metadata.get_subject_categories():
- if subject_id in self.perimeter.get_subjects():
- if category_value in self.configuration.get_subject_category_values()[category_id]:
- if len(self.assignment.get_subject_category_assignments()[category_id][subject_id]) >= 2:
- self.assignment.get_subject_category_assignments()[category_id][subject_id].remove(category_value)
- else:
- self.assignment.get_subject_category_assignments()[category_id].pop(subject_id)
- return self.assignment.get_subject_category_assignments()
- else:
- return "[ERROR] Del Subject Assignment: Assignment Unknown"
- else:
- return "[ERROR] Del Subject Assignment: Subject Unknown"
- else:
- return "[ERROR] Del Subject Assignment: Subject Category Unknown"
-
- def get_object_assignments(self, category_id):
- if category_id in self.metadata.get_object_categories():
- return self.assignment.get_object_category_assignments()[category_id]
- else:
- return "[ERROR] Get Object Assignment: Object Category Unknown"
-
- def add_object_assignment(self, category_id, object_id, category_value):
- if category_id in self.metadata.get_object_categories():
- if object_id in self.perimeter.get_objects():
- if category_value in self.configuration.get_object_category_values()[category_id]:
- if category_id in self.assignment.get_object_category_assignments().keys():
- if object_id in self.assignment.get_object_category_assignments()[category_id].keys():
- if category_value in self.assignment.get_object_category_assignments()[category_id][object_id]:
- return "[ERROR] Add Object Assignment: Object Assignment Exists"
- else:
- self.assignment.get_object_category_assignments()[category_id][object_id].extend([category_value])
- else:
- self.assignment.get_object_category_assignments()[category_id][object_id] = [category_value]
- else:
- self.assignment.get_object_category_assignments()[category_id] = {object_id: [category_value]}
- return self.assignment.get_object_category_assignments()
- else:
- return "[ERROR] Add Object Assignment: Object Category Value Unknown"
- else:
- return "[ERROR] Add Object Assignment: Object Unknown"
- else:
- return "[ERROR] Add Object Assignment: Object Category Unknown"
-
- def del_object_assignment(self, category_id, object_id, category_value):
- if category_id in self.metadata.get_object_categories():
- if object_id in self.perimeter.get_objects():
- if category_value in self.configuration.get_object_category_values()[category_id]:
- if len(self.assignment.get_object_category_assignments()[category_id][object_id]) >= 2:
- self.assignment.get_object_category_assignments()[category_id][object_id].remove(category_value)
- else:
- self.assignment.get_object_category_assignments()[category_id].pop(object_id)
- return self.assignment.get_object_category_assignments()
- else:
- return "[ERROR] Del Object Assignment: Assignment Unknown"
- else:
- return "[ERROR] Del Object Assignment: Object Unknown"
- else:
- return "[ERROR] Del Object Assignment: Object Category Unknown"
-
- # ---------------- inter-extension API ----------------
-
- def create_requesting_collaboration(self, sub_list, vent_uuid, act):
- _sub_cat_values = dict()
- _obj_cat_values = dict()
-
- if type(self.add_object(vent_uuid)) is not list:
- return "[Error] Create Requesting Collaboration: No Success"
- for _relation in self.get_meta_rule()["sub_meta_rules"]:
- for _sub_cat_id in self.get_meta_rule()["sub_meta_rules"][_relation]["subject_categories"]:
- _sub_cat_value = str(uuid4())
- if type(self.add_subject_category_value(_sub_cat_id, _sub_cat_value)) is not list:
- return "[Error] Create Requesting Collaboration: No Success"
- _sub_cat_values[_relation] = {_sub_cat_id: _sub_cat_value}
- for _sub in sub_list:
- if type(self.add_subject_assignment(_sub_cat_id, _sub, _sub_cat_value)) is not dict:
- return "[Error] Create Requesting Collaboration: No Success"
-
- for _obj_cat_id in self.get_meta_rule()["sub_meta_rules"][_relation]["object_categories"]:
- if _obj_cat_id == 'action':
- _obj_cat_values[_relation][_obj_cat_id] = act
- else:
- _obj_cat_value = str(uuid4())
- if type(self.add_object_category_value(_obj_cat_id, _obj_cat_value)) is not list:
- return "[Error] Create Requesting Collaboration: No Success"
- if type(self.add_object_assignment(_obj_cat_id, vent_uuid, _obj_cat_value)) is not dict:
- return "[Error] Create Requesting Collaboration: No Success"
- _obj_cat_values[_relation] = {_obj_cat_id: _obj_cat_value}
-
- _rule = self.add_rule(_sub_cat_values, _obj_cat_values)
- if type(_rule) is not dict:
- return "[Error] Create Requesting Collaboration: No Success"
- return {"subject_category_value_dict": _sub_cat_values, "object_category_value_dict": _obj_cat_values,
- "rule": _rule}
-
- def destroy_requesting_collaboration(self, sub_list, vent_uuid, sub_cat_value_dict, obj_cat_value_dict):
- for _relation in self.get_meta_rule()["sub_meta_rules"]:
- for _sub_cat_id in self.get_meta_rule()["sub_meta_rules"][_relation]["subject_categories"]:
- for _sub in sub_list:
- if type(self.del_subject_assignment(_sub_cat_id, _sub, sub_cat_value_dict[_relation][_sub_cat_id]))\
- is not dict:
- return "[Error] Destroy Requesting Collaboration: No Success"
- if type(self.del_subject_category_value(_sub_cat_id, sub_cat_value_dict[_relation][_sub_cat_id])) \
- is not list:
- return "[Error] Destroy Requesting Collaboration: No Success"
-
- for _obj_cat_id in self.get_meta_rule()["sub_meta_rules"][_relation]["object_categories"]:
- if _obj_cat_id == "action":
- pass # TODO: reconsidering the action as object attribute
- else:
- if type(self.del_object_assignment(_obj_cat_id, vent_uuid, obj_cat_value_dict[_relation][_obj_cat_id])) is not dict:
- return "[Error] Destroy Requesting Collaboration: No Success"
- if type(self.del_object_category_value(_obj_cat_id, obj_cat_value_dict[_relation][_obj_cat_id])) is not list:
- return "[Error] Destroy Requesting Collaboration: No Success"
-
- if type(self.del_rule(sub_cat_value_dict, obj_cat_value_dict)) is not dict:
- return "[Error] Destroy Requesting Collaboration: No Success"
- if type(self.del_object(vent_uuid)) is not list:
- return "[Error] Destroy Requesting Collaboration: No Success"
- return "[Destroy Requesting Collaboration] OK"
-
- def create_requested_collaboration(self, vent_uuid, obj_list, act):
- _sub_cat_values = dict()
- _obj_cat_values = dict()
-
- if type(self.add_subject(vent_uuid)) is not list:
- return "[Error] Create Requested Collaboration: No Success"
-
- for _relation in self.get_meta_rule()["sub_meta_rules"]:
- for _sub_cat_id in self.get_meta_rule()["sub_meta_rules"][_relation]["subject_categories"]:
- _sub_cat_value = str(uuid4())
- if type(self.add_subject_category_value(_sub_cat_id, _sub_cat_value)) is not list:
- return "[Error] Create Requested Collaboration: No Success"
- _sub_cat_values[_relation] = {_sub_cat_id: _sub_cat_value}
- if type(self.add_subject_assignment(_sub_cat_id, vent_uuid, _sub_cat_value)) is not dict:
- return "[Error] Create Requested Collaboration: No Success"
-
- for _obj_cat_id in self.get_meta_rule()["sub_meta_rules"][_relation]["object_categories"]:
- if _obj_cat_id == 'action':
- _obj_cat_values[_relation][_obj_cat_id] = act
- else:
- _obj_cat_value = str(uuid4())
- if type(self.add_object_category_value(_obj_cat_id, _obj_cat_value)) is not list:
- return "[Error] Create Requested Collaboration: No Success"
- _obj_cat_values[_relation] = {_obj_cat_id: _obj_cat_value}
- for _obj in obj_list:
- if type(self.add_object_assignment(_obj_cat_id, _obj, _obj_cat_value)) is not dict:
- return "[Error] Create Requested Collaboration: No Success"
-
- _rule = self.add_rule(_sub_cat_values, _obj_cat_values)
- if type(_rule) is not dict:
- return "[Error] Create Requested Collaboration: No Success"
- return {"subject_category_value_dict": _sub_cat_values, "object_category_value_dict": _obj_cat_values,
- "rule": _rule}
-
- def destroy_requested_collaboration(self, vent_uuid, obj_list, sub_cat_value_dict, obj_cat_value_dict):
- for _relation in self.get_meta_rule()["sub_meta_rules"]:
- for _sub_cat_id in self.get_meta_rule()["sub_meta_rules"][_relation]["subject_categories"]:
- if type(self.del_subject_assignment(_sub_cat_id, vent_uuid, sub_cat_value_dict[_relation][_sub_cat_id])) is not dict:
- return "[Error] Destroy Requested Collaboration: No Success"
- if type(self.del_subject_category_value(_sub_cat_id, sub_cat_value_dict[_relation][_sub_cat_id])) is not list:
- return "[Error] Destroy Requested Collaboration: No Success"
-
- for _obj_cat_id in self.get_meta_rule()["sub_meta_rules"][_relation]["object_categories"]:
- if _obj_cat_id == "action":
- pass # TODO: reconsidering the action as object attribute
- else:
- for _obj in obj_list:
- if type(self.del_object_assignment(_obj_cat_id, _obj, obj_cat_value_dict[_relation][_obj_cat_id])) is not dict:
- return "[Error] Destroy Requested Collaboration: No Success"
- if type(self.del_object_category_value(_obj_cat_id, obj_cat_value_dict[_relation][_obj_cat_id])) is not list:
- return "[Error] Destroy Requested Collaboration: No Success"
-
- if type(self.del_rule(sub_cat_value_dict, obj_cat_value_dict)) is not dict:
- return "[Error] Destroy Requested Collaboration: No Success"
- if type(self.del_subject(vent_uuid)) is not list:
- return "[Error] Destroy Requested Collaboration: No Success"
- return "[Destroy Requested Collaboration] OK"
-
- # ---------------- sync_db api ----------------
-
- def get_data(self):
- data = dict()
- data["metadata"] = self.metadata.get_data()
- data["configuration"] = self.configuration.get_data()
- data["perimeter"] = self.perimeter.get_data()
- data["assignment"] = self.assignment.get_data()
- return data
-
- def set_data(self, extension_data):
- self.metadata.set_data(extension_data["metadata"])
- self.configuration.set_data(extension_data["configuration"])
- self.perimeter.set_data(extension_data["perimeter"])
- self.assignment.set_data(extension_data["assignment"])
diff --git a/keystone-moon/keystone/contrib/moon/migrate_repo/versions/001_moon.py b/keystone-moon/keystone/contrib/moon/migrate_repo/versions/001_moon.py
index af4d80bb..4fd26bef 100644
--- a/keystone-moon/keystone/contrib/moon/migrate_repo/versions/001_moon.py
+++ b/keystone-moon/keystone/contrib/moon/migrate_repo/versions/001_moon.py
@@ -21,11 +21,11 @@ def upgrade(migrate_engine):
mysql_charset='utf8')
intra_extension_table.create(migrate_engine, checkfirst=True)
- intra_extension_table.insert().values(id=uuid4().hex, intra_extension={
- 'name': "Root Extension",
- 'description': "The root intra extension",
- 'model': 'admin'
- })
+ # intra_extension_table.insert().values(id=uuid4().hex, intra_extension={
+ # 'name': "Root Extension",
+ # 'description': "The root intra extension",
+ # 'model': 'admin'
+ # })
tenant_table = sql.Table(
'tenants',
@@ -195,8 +195,6 @@ def upgrade(migrate_engine):
mysql_charset='utf8')
rules_table.create(migrate_engine, checkfirst=True)
- # TODO: load root_extension
-
def downgrade(migrate_engine):
meta = sql.MetaData()
diff --git a/keystone-moon/keystone/contrib/moon/routers.py b/keystone-moon/keystone/contrib/moon/routers.py
index 4da3b991..63915092 100644
--- a/keystone-moon/keystone/contrib/moon/routers.py
+++ b/keystone-moon/keystone/contrib/moon/routers.py
@@ -9,7 +9,7 @@ from keystone.contrib.moon import controllers
from keystone.common import wsgi
-class Routers(wsgi.RoutersBase):
+class Routers(wsgi.V3ExtensionRouter):
"""API Endpoints for the Moon extension.
"""
diff --git a/keystone-moon/keystone/contrib/oauth1/backends/sql.py b/keystone-moon/keystone/contrib/oauth1/backends/sql.py
index c6ab6e5a..a7876756 100644
--- a/keystone-moon/keystone/contrib/oauth1/backends/sql.py
+++ b/keystone-moon/keystone/contrib/oauth1/backends/sql.py
@@ -18,9 +18,9 @@ import uuid
from oslo_serialization import jsonutils
from oslo_utils import timeutils
-import six
from keystone.common import sql
+from keystone.common import utils
from keystone.contrib.oauth1 import core
from keystone import exception
from keystone.i18n import _
@@ -58,7 +58,7 @@ class RequestToken(sql.ModelBase, sql.DictBase):
return cls(**user_dict)
def to_dict(self):
- return dict(six.iteritems(self))
+ return dict(self.items())
class AccessToken(sql.ModelBase, sql.DictBase):
@@ -81,7 +81,7 @@ class AccessToken(sql.ModelBase, sql.DictBase):
return cls(**user_dict)
def to_dict(self):
- return dict(six.iteritems(self))
+ return dict(self.items())
class OAuth1(object):
@@ -163,7 +163,7 @@ class OAuth1(object):
if token_duration:
now = timeutils.utcnow()
future = now + datetime.timedelta(seconds=token_duration)
- expiry_date = timeutils.isotime(future, subsecond=True)
+ expiry_date = utils.isotime(future, subsecond=True)
ref = {}
ref['id'] = request_token_id
@@ -225,7 +225,7 @@ class OAuth1(object):
if token_duration:
now = timeutils.utcnow()
future = now + datetime.timedelta(seconds=token_duration)
- expiry_date = timeutils.isotime(future, subsecond=True)
+ expiry_date = utils.isotime(future, subsecond=True)
# add Access Token
ref = {}
diff --git a/keystone-moon/keystone/contrib/oauth1/controllers.py b/keystone-moon/keystone/contrib/oauth1/controllers.py
index fb5d0bc2..d12fc96b 100644
--- a/keystone-moon/keystone/contrib/oauth1/controllers.py
+++ b/keystone-moon/keystone/contrib/oauth1/controllers.py
@@ -20,12 +20,12 @@ from oslo_utils import timeutils
from keystone.common import controller
from keystone.common import dependency
+from keystone.common import utils
from keystone.common import wsgi
from keystone.contrib.oauth1 import core as oauth1
from keystone.contrib.oauth1 import validator
from keystone import exception
from keystone.i18n import _
-from keystone.models import token_model
from keystone import notifications
@@ -84,10 +84,7 @@ class ConsumerCrudV3(controller.V3Controller):
@controller.protected()
def delete_consumer(self, context, consumer_id):
- user_token_ref = token_model.KeystoneToken(
- token_id=context['token_id'],
- token_data=self.token_provider_api.validate_token(
- context['token_id']))
+ user_token_ref = utils.get_token_ref(context)
payload = {'user_id': user_token_ref.user_id,
'consumer_id': consumer_id}
_emit_user_oauth_consumer_token_invalidate(payload)
@@ -382,10 +379,7 @@ class OAuthControllerV3(controller.V3Controller):
authed_roles.add(role['id'])
# verify the authorizing user has the roles
- user_token = token_model.KeystoneToken(
- token_id=context['token_id'],
- token_data=self.token_provider_api.validate_token(
- context['token_id']))
+ user_token = utils.get_token_ref(context)
user_id = user_token.user_id
project_id = req_token['requested_project_id']
user_roles = self.assignment_api.get_roles_for_user_and_project(
diff --git a/keystone-moon/keystone/contrib/oauth1/core.py b/keystone-moon/keystone/contrib/oauth1/core.py
index eeb3e114..d7f64dc4 100644
--- a/keystone-moon/keystone/contrib/oauth1/core.py
+++ b/keystone-moon/keystone/contrib/oauth1/core.py
@@ -12,7 +12,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-"""Extensions supporting OAuth1."""
+"""Main entry point into the OAuth1 service."""
from __future__ import absolute_import
@@ -151,6 +151,9 @@ class Manager(manager.Manager):
dynamically calls the backend.
"""
+
+ driver_namespace = 'keystone.oauth1'
+
_ACCESS_TOKEN = "OS-OAUTH1:access_token"
_REQUEST_TOKEN = "OS-OAUTH1:request_token"
_CONSUMER = "OS-OAUTH1:consumer"
diff --git a/keystone-moon/keystone/contrib/oauth1/migrate_repo/versions/001_add_oauth_tables.py b/keystone-moon/keystone/contrib/oauth1/migrate_repo/versions/001_add_oauth_tables.py
index a4fbf155..e0305351 100644
--- a/keystone-moon/keystone/contrib/oauth1/migrate_repo/versions/001_add_oauth_tables.py
+++ b/keystone-moon/keystone/contrib/oauth1/migrate_repo/versions/001_add_oauth_tables.py
@@ -55,13 +55,3 @@ def upgrade(migrate_engine):
sql.Column('consumer_id', sql.String(64), nullable=False),
sql.Column('expires_at', sql.String(64), nullable=True))
access_token_table.create(migrate_engine, checkfirst=True)
-
-
-def downgrade(migrate_engine):
- meta = sql.MetaData()
- meta.bind = migrate_engine
- # Operations to reverse the above upgrade go here.
- tables = ['consumer', 'request_token', 'access_token']
- for table_name in tables:
- table = sql.Table(table_name, meta, autoload=True)
- table.drop()
diff --git a/keystone-moon/keystone/contrib/oauth1/migrate_repo/versions/002_fix_oauth_tables_fk.py b/keystone-moon/keystone/contrib/oauth1/migrate_repo/versions/002_fix_oauth_tables_fk.py
index d39df8d5..174120e8 100644
--- a/keystone-moon/keystone/contrib/oauth1/migrate_repo/versions/002_fix_oauth_tables_fk.py
+++ b/keystone-moon/keystone/contrib/oauth1/migrate_repo/versions/002_fix_oauth_tables_fk.py
@@ -35,20 +35,3 @@ def upgrade(migrate_engine):
'ref_column': consumer_table.c.id}]
if meta.bind != 'sqlite':
migration_helpers.add_constraints(constraints)
-
-
-def downgrade(migrate_engine):
- meta = sql.MetaData()
- meta.bind = migrate_engine
- consumer_table = sql.Table('consumer', meta, autoload=True)
- request_token_table = sql.Table('request_token', meta, autoload=True)
- access_token_table = sql.Table('access_token', meta, autoload=True)
-
- constraints = [{'table': request_token_table,
- 'fk_column': 'consumer_id',
- 'ref_column': consumer_table.c.id},
- {'table': access_token_table,
- 'fk_column': 'consumer_id',
- 'ref_column': consumer_table.c.id}]
- if migrate_engine.name != 'sqlite':
- migration_helpers.remove_constraints(constraints)
diff --git a/keystone-moon/keystone/contrib/oauth1/migrate_repo/versions/003_consumer_description_nullalbe.py b/keystone-moon/keystone/contrib/oauth1/migrate_repo/versions/003_consumer_description_nullalbe.py
index e1cf8843..cf6ffb7c 100644
--- a/keystone-moon/keystone/contrib/oauth1/migrate_repo/versions/003_consumer_description_nullalbe.py
+++ b/keystone-moon/keystone/contrib/oauth1/migrate_repo/versions/003_consumer_description_nullalbe.py
@@ -20,10 +20,3 @@ def upgrade(migrate_engine):
meta.bind = migrate_engine
user_table = sql.Table('consumer', meta, autoload=True)
user_table.c.description.alter(nullable=True)
-
-
-def downgrade(migrate_engine):
- meta = sql.MetaData()
- meta.bind = migrate_engine
- user_table = sql.Table('consumer', meta, autoload=True)
- user_table.c.description.alter(nullable=False)
diff --git a/keystone-moon/keystone/contrib/oauth1/migrate_repo/versions/004_request_token_roles_nullable.py b/keystone-moon/keystone/contrib/oauth1/migrate_repo/versions/004_request_token_roles_nullable.py
index 6f1e2e81..6934eb6f 100644
--- a/keystone-moon/keystone/contrib/oauth1/migrate_repo/versions/004_request_token_roles_nullable.py
+++ b/keystone-moon/keystone/contrib/oauth1/migrate_repo/versions/004_request_token_roles_nullable.py
@@ -23,13 +23,3 @@ def upgrade(migrate_engine):
request_token_table.c.requested_roles.alter(name="role_ids")
access_token_table = sql.Table('access_token', meta, autoload=True)
access_token_table.c.requested_roles.alter(name="role_ids")
-
-
-def downgrade(migrate_engine):
- meta = sql.MetaData()
- meta.bind = migrate_engine
- request_token_table = sql.Table('request_token', meta, autoload=True)
- request_token_table.c.role_ids.alter(nullable=False)
- request_token_table.c.role_ids.alter(name="requested_roles")
- access_token_table = sql.Table('access_token', meta, autoload=True)
- access_token_table.c.role_ids.alter(name="requested_roles")
diff --git a/keystone-moon/keystone/contrib/oauth1/migrate_repo/versions/005_consumer_id_index.py b/keystone-moon/keystone/contrib/oauth1/migrate_repo/versions/005_consumer_id_index.py
index 428971f8..0627d21c 100644
--- a/keystone-moon/keystone/contrib/oauth1/migrate_repo/versions/005_consumer_id_index.py
+++ b/keystone-moon/keystone/contrib/oauth1/migrate_repo/versions/005_consumer_id_index.py
@@ -26,17 +26,10 @@ def upgrade(migrate_engine):
# indexes create automatically. That those indexes will have different
# names, depending on version of MySQL used. We shoud make this naming
# consistent, by reverting index name to a consistent condition.
- if any(i for i in table.indexes if i.columns.keys() == ['consumer_id']
+ if any(i for i in table.indexes if
+ list(i.columns.keys()) == ['consumer_id']
and i.name != 'consumer_id'):
# NOTE(i159): by this action will be made re-creation of an index
# with the new name. This can be considered as renaming under the
# MySQL rules.
sa.Index('consumer_id', table.c.consumer_id).create()
-
-
-def downgrade(migrate_engine):
- # NOTE(i159): index exists only in MySQL schemas, and got an inconsistent
- # name only when MySQL 5.5 renamed it after re-creation
- # (during migrations). So we just fixed inconsistency, there is no
- # necessity to revert it.
- pass
diff --git a/keystone-moon/keystone/contrib/oauth1/routers.py b/keystone-moon/keystone/contrib/oauth1/routers.py
index 35619ede..4b772eb5 100644
--- a/keystone-moon/keystone/contrib/oauth1/routers.py
+++ b/keystone-moon/keystone/contrib/oauth1/routers.py
@@ -44,17 +44,17 @@ class OAuth1Extension(wsgi.V3ExtensionRouter):
# Basic admin-only consumer crud
POST /OS-OAUTH1/consumers
GET /OS-OAUTH1/consumers
- PATCH /OS-OAUTH1/consumers/$consumer_id
- GET /OS-OAUTH1/consumers/$consumer_id
- DELETE /OS-OAUTH1/consumers/$consumer_id
+ PATCH /OS-OAUTH1/consumers/{consumer_id}
+ GET /OS-OAUTH1/consumers/{consumer_id}
+ DELETE /OS-OAUTH1/consumers/{consumer_id}
# User access token crud
- GET /users/$user_id/OS-OAUTH1/access_tokens
- GET /users/$user_id/OS-OAUTH1/access_tokens/$access_token_id
+ GET /users/{user_id}/OS-OAUTH1/access_tokens
+ GET /users/{user_id}/OS-OAUTH1/access_tokens/{access_token_id}
GET /users/{user_id}/OS-OAUTH1/access_tokens/{access_token_id}/roles
GET /users/{user_id}/OS-OAUTH1/access_tokens
/{access_token_id}/roles/{role_id}
- DELETE /users/$user_id/OS-OAUTH1/access_tokens/$access_token_id
+ DELETE /users/{user_id}/OS-OAUTH1/access_tokens/{access_token_id}
# OAuth interfaces
POST /OS-OAUTH1/request_token # create a request token
diff --git a/keystone-moon/keystone/contrib/revoke/backends/kvs.py b/keystone-moon/keystone/contrib/revoke/backends/kvs.py
index cc41fbee..349ed6e3 100644
--- a/keystone-moon/keystone/contrib/revoke/backends/kvs.py
+++ b/keystone-moon/keystone/contrib/revoke/backends/kvs.py
@@ -13,12 +13,12 @@
import datetime
from oslo_config import cfg
+from oslo_log import versionutils
from oslo_utils import timeutils
from keystone.common import kvs
from keystone.contrib import revoke
from keystone import exception
-from keystone.openstack.common import versionutils
CONF = cfg.CONF
@@ -45,29 +45,30 @@ class Revoke(revoke.Driver):
except exception.NotFound:
return []
- def _prune_expired_events_and_get(self, last_fetch=None, new_event=None):
- pruned = []
+ def list_events(self, last_fetch=None):
results = []
+
+ with self._store.get_lock(_EVENT_KEY):
+ events = self._list_events()
+
+ for event in events:
+ revoked_at = event.revoked_at
+ if last_fetch is None or revoked_at > last_fetch:
+ results.append(event)
+ return results
+
+ def revoke(self, event):
+ pruned = []
expire_delta = datetime.timedelta(seconds=CONF.token.expiration)
oldest = timeutils.utcnow() - expire_delta
- # TODO(ayoung): Store the time of the oldest event so that the
- # prune process can be skipped if none of the events have timed out.
+
with self._store.get_lock(_EVENT_KEY) as lock:
events = self._list_events()
- if new_event is not None:
- events.append(new_event)
+ if event:
+ events.append(event)
for event in events:
revoked_at = event.revoked_at
if revoked_at > oldest:
pruned.append(event)
- if last_fetch is None or revoked_at > last_fetch:
- results.append(event)
self._store.set(_EVENT_KEY, pruned, lock)
- return results
-
- def list_events(self, last_fetch=None):
- return self._prune_expired_events_and_get(last_fetch=last_fetch)
-
- def revoke(self, event):
- self._prune_expired_events_and_get(new_event=event)
diff --git a/keystone-moon/keystone/contrib/revoke/backends/sql.py b/keystone-moon/keystone/contrib/revoke/backends/sql.py
index 1b0cde1e..dd7fdd19 100644
--- a/keystone-moon/keystone/contrib/revoke/backends/sql.py
+++ b/keystone-moon/keystone/contrib/revoke/backends/sql.py
@@ -33,7 +33,7 @@ class RevocationEvent(sql.ModelBase, sql.ModelDictMixin):
access_token_id = sql.Column(sql.String(64))
issued_before = sql.Column(sql.DateTime(), nullable=False)
expires_at = sql.Column(sql.DateTime())
- revoked_at = sql.Column(sql.DateTime(), nullable=False)
+ revoked_at = sql.Column(sql.DateTime(), nullable=False, index=True)
audit_id = sql.Column(sql.String(32))
audit_chain_id = sql.Column(sql.String(32))
@@ -81,7 +81,6 @@ class Revoke(revoke.Driver):
session.flush()
def list_events(self, last_fetch=None):
- self._prune_expired_events()
session = sql.get_session()
query = session.query(RevocationEvent).order_by(
RevocationEvent.revoked_at)
@@ -102,3 +101,4 @@ class Revoke(revoke.Driver):
session = sql.get_session()
with session.begin():
session.add(record)
+ self._prune_expired_events()
diff --git a/keystone-moon/keystone/contrib/revoke/core.py b/keystone-moon/keystone/contrib/revoke/core.py
index c7335690..e1ab87c8 100644
--- a/keystone-moon/keystone/contrib/revoke/core.py
+++ b/keystone-moon/keystone/contrib/revoke/core.py
@@ -10,11 +10,14 @@
# License for the specific language governing permissions and limitations
# under the License.
+"""Main entry point into the Revoke service."""
+
import abc
import datetime
from oslo_config import cfg
from oslo_log import log
+from oslo_log import versionutils
from oslo_utils import timeutils
import six
@@ -26,7 +29,6 @@ from keystone.contrib.revoke import model
from keystone import exception
from keystone.i18n import _
from keystone import notifications
-from keystone.openstack.common import versionutils
CONF = cfg.CONF
@@ -64,12 +66,17 @@ def revoked_before_cutoff_time():
@dependency.provider('revoke_api')
class Manager(manager.Manager):
- """Revoke API Manager.
+ """Default pivot point for the Revoke backend.
Performs common logic for recording revocations.
+ See :mod:`keystone.common.manager.Manager` for more details on
+ how this dynamically calls the backend.
+
"""
+ driver_namespace = 'keystone.revoke'
+
def __init__(self):
super(Manager, self).__init__(CONF.revoke.driver)
self._register_listeners()
@@ -109,11 +116,12 @@ class Manager(manager.Manager):
self.revoke(
model.RevokeEvent(access_token_id=payload['resource_info']))
- def _group_callback(self, service, resource_type, operation, payload):
- user_ids = (u['id'] for u in self.identity_api.list_users_in_group(
- payload['resource_info']))
- for uid in user_ids:
- self.revoke(model.RevokeEvent(user_id=uid))
+ def _role_assignment_callback(self, service, resource_type, operation,
+ payload):
+ info = payload['resource_info']
+ self.revoke_by_grant(role_id=info['role_id'], user_id=info['user_id'],
+ domain_id=info.get('domain_id'),
+ project_id=info.get('project_id'))
def _register_listeners(self):
callbacks = {
@@ -124,6 +132,7 @@ class Manager(manager.Manager):
['role', self._role_callback],
['user', self._user_callback],
['project', self._project_callback],
+ ['role_assignment', self._role_assignment_callback]
],
notifications.ACTIONS.disabled: [
['user', self._user_callback],
@@ -136,7 +145,7 @@ class Manager(manager.Manager):
]
}
- for event, cb_info in six.iteritems(callbacks):
+ for event, cb_info in callbacks.items():
for resource_type, callback_fns in cb_info:
notifications.register_event_callback(event, resource_type,
callback_fns)
diff --git a/keystone-moon/keystone/contrib/revoke/migrate_repo/versions/001_revoke_table.py b/keystone-moon/keystone/contrib/revoke/migrate_repo/versions/001_revoke_table.py
index 7927ce0c..8b59010e 100644
--- a/keystone-moon/keystone/contrib/revoke/migrate_repo/versions/001_revoke_table.py
+++ b/keystone-moon/keystone/contrib/revoke/migrate_repo/versions/001_revoke_table.py
@@ -34,14 +34,3 @@ def upgrade(migrate_engine):
sql.Column('expires_at', sql.DateTime()),
sql.Column('revoked_at', sql.DateTime(), index=True, nullable=False))
service_table.create(migrate_engine, checkfirst=True)
-
-
-def downgrade(migrate_engine):
- # Operations to reverse the above upgrade go here.
- meta = sql.MetaData()
- meta.bind = migrate_engine
-
- tables = ['revocation_event']
- for t in tables:
- table = sql.Table(t, meta, autoload=True)
- table.drop(migrate_engine, checkfirst=True)
diff --git a/keystone-moon/keystone/contrib/revoke/migrate_repo/versions/002_add_audit_id_and_chain_to_revoke_table.py b/keystone-moon/keystone/contrib/revoke/migrate_repo/versions/002_add_audit_id_and_chain_to_revoke_table.py
index bee6fb2a..b6d821d7 100644
--- a/keystone-moon/keystone/contrib/revoke/migrate_repo/versions/002_add_audit_id_and_chain_to_revoke_table.py
+++ b/keystone-moon/keystone/contrib/revoke/migrate_repo/versions/002_add_audit_id_and_chain_to_revoke_table.py
@@ -26,12 +26,3 @@ def upgrade(migrate_engine):
nullable=True)
event_table.create_column(audit_id_column)
event_table.create_column(audit_chain_column)
-
-
-def downgrade(migrate_engine):
- meta = sql.MetaData()
- meta.bind = migrate_engine
-
- event_table = sql.Table(_TABLE_NAME, meta, autoload=True)
- event_table.drop_column('audit_id')
- event_table.drop_column('audit_chain_id')
diff --git a/keystone-moon/keystone/contrib/revoke/model.py b/keystone-moon/keystone/contrib/revoke/model.py
index 5e92042d..1a23d57d 100644
--- a/keystone-moon/keystone/contrib/revoke/model.py
+++ b/keystone-moon/keystone/contrib/revoke/model.py
@@ -11,6 +11,9 @@
# under the License.
from oslo_utils import timeutils
+from six.moves import map
+
+from keystone.common import utils
# The set of attributes common between the RevokeEvent
@@ -43,6 +46,15 @@ _TOKEN_KEYS = ['identity_domain_id',
'trustor_id',
'trustee_id']
+# Alternative names to be checked in token for every field in
+# revoke tree.
+ALTERNATIVES = {
+ 'user_id': ['user_id', 'trustor_id', 'trustee_id'],
+ 'domain_id': ['identity_domain_id', 'assignment_domain_id'],
+ # For a domain-scoped token, the domain is in assignment_domain_id.
+ 'domain_scope_id': ['assignment_domain_id', ],
+}
+
REVOKE_KEYS = _NAMES + _EVENT_ARGS
@@ -100,10 +112,10 @@ class RevokeEvent(object):
if self.consumer_id is not None:
event['OS-OAUTH1:access_token_id'] = self.access_token_id
if self.expires_at is not None:
- event['expires_at'] = timeutils.isotime(self.expires_at)
+ event['expires_at'] = utils.isotime(self.expires_at)
if self.issued_before is not None:
- event['issued_before'] = timeutils.isotime(self.issued_before,
- subsecond=True)
+ event['issued_before'] = utils.isotime(self.issued_before,
+ subsecond=True)
return event
def key_for_name(self, name):
@@ -111,7 +123,7 @@ class RevokeEvent(object):
def attr_keys(event):
- return map(event.key_for_name, _EVENT_NAMES)
+ return list(map(event.key_for_name, _EVENT_NAMES))
class RevokeTree(object):
@@ -176,7 +188,52 @@ class RevokeTree(object):
del parent[key]
def add_events(self, revoke_events):
- return map(self.add_event, revoke_events or [])
+ return list(map(self.add_event, revoke_events or []))
+
+ @staticmethod
+ def _next_level_keys(name, token_data):
+ """Generate keys based on current field name and token data
+
+ Generate all keys to look for in the next iteration of revocation
+ event tree traversal.
+ """
+ yield '*'
+ if name == 'role_id':
+ # Roles are very special since a token has a list of them.
+ # If the revocation event matches any one of them,
+ # revoke the token.
+ for role_id in token_data.get('roles', []):
+ yield role_id
+ else:
+ # For other fields we try to get any branch that concur
+ # with any alternative field in the token.
+ for alt_name in ALTERNATIVES.get(name, [name]):
+ yield token_data[alt_name]
+
+ def _search(self, revoke_map, names, token_data):
+ """Search for revocation event by token_data
+
+ Traverse the revocation events tree looking for event matching token
+ data issued after the token.
+ """
+ if not names:
+ # The last (leaf) level is checked in a special way because we
+ # verify issued_at field differently.
+ try:
+ return revoke_map['issued_before'] > token_data['issued_at']
+ except KeyError:
+ return False
+
+ name, remaining_names = names[0], names[1:]
+
+ for key in self._next_level_keys(name, token_data):
+ subtree = revoke_map.get('%s=%s' % (name, key))
+ if subtree and self._search(subtree, remaining_names, token_data):
+ return True
+
+ # If we made it out of the loop then no element in revocation tree
+ # corresponds to our token and it is good.
+ return False
def is_revoked(self, token_data):
"""Check if a token matches the revocation event
@@ -195,58 +252,7 @@ class RevokeTree(object):
'consumer_id', 'access_token_id'
"""
- # Alternative names to be checked in token for every field in
- # revoke tree.
- alternatives = {
- 'user_id': ['user_id', 'trustor_id', 'trustee_id'],
- 'domain_id': ['identity_domain_id', 'assignment_domain_id'],
- # For a domain-scoped token, the domain is in assignment_domain_id.
- 'domain_scope_id': ['assignment_domain_id', ],
- }
- # Contains current forest (collection of trees) to be checked.
- partial_matches = [self.revoke_map]
- # We iterate over every layer of our revoke tree (except the last one).
- for name in _EVENT_NAMES:
- # bundle is the set of partial matches for the next level down
- # the tree
- bundle = []
- wildcard = '%s=*' % (name,)
- # For every tree in current forest.
- for tree in partial_matches:
- # If there is wildcard node on current level we take it.
- bundle.append(tree.get(wildcard))
- if name == 'role_id':
- # Roles are very special since a token has a list of them.
- # If the revocation event matches any one of them,
- # revoke the token.
- for role_id in token_data.get('roles', []):
- bundle.append(tree.get('role_id=%s' % role_id))
- else:
- # For other fields we try to get any branch that concur
- # with any alternative field in the token.
- for alt_name in alternatives.get(name, [name]):
- bundle.append(
- tree.get('%s=%s' % (name, token_data[alt_name])))
- # tree.get returns `None` if there is no match, so `bundle.append`
- # adds a 'None' entry. This call remoes the `None` entries.
- partial_matches = [x for x in bundle if x is not None]
- if not partial_matches:
- # If we end up with no branches to follow means that the token
- # is definitely not in the revoke tree and all further
- # iterations will be for nothing.
- return False
-
- # The last (leaf) level is checked in a special way because we verify
- # issued_at field differently.
- for leaf in partial_matches:
- try:
- if leaf['issued_before'] > token_data['issued_at']:
- return True
- except KeyError:
- pass
- # If we made it out of the loop then no element in revocation tree
- # corresponds to our token and it is good.
- return False
+ return self._search(self.revoke_map, _EVENT_NAMES, token_data)
def build_token_values_v2(access, default_domain_id):
diff --git a/keystone-moon/keystone/contrib/s3/core.py b/keystone-moon/keystone/contrib/s3/core.py
index 34095bf4..d3e06acc 100644
--- a/keystone-moon/keystone/contrib/s3/core.py
+++ b/keystone-moon/keystone/contrib/s3/core.py
@@ -25,6 +25,8 @@ import base64
import hashlib
import hmac
+import six
+
from keystone.common import extension
from keystone.common import json_home
from keystone.common import utils
@@ -32,6 +34,7 @@ from keystone.common import wsgi
from keystone.contrib.ec2 import controllers
from keystone import exception
+
EXTENSION_DATA = {
'name': 'OpenStack S3 API',
'namespace': 'http://docs.openstack.org/identity/api/ext/'
@@ -65,9 +68,15 @@ class S3Extension(wsgi.V3ExtensionRouter):
class S3Controller(controllers.Ec2Controller):
def check_signature(self, creds_ref, credentials):
msg = base64.urlsafe_b64decode(str(credentials['token']))
- key = str(creds_ref['secret'])
- signed = base64.encodestring(
- hmac.new(key, msg, hashlib.sha1).digest()).strip()
+ key = str(creds_ref['secret']).encode('utf-8')
+
+ if six.PY2:
+ b64_encode = base64.encodestring
+ else:
+ b64_encode = base64.encodebytes
+
+ signed = b64_encode(
+ hmac.new(key, msg, hashlib.sha1).digest()).decode('utf-8').strip()
if not utils.auth_str_equal(credentials['signature'], signed):
raise exception.Unauthorized('Credential signature mismatch')
diff --git a/keystone-moon/keystone/controllers.py b/keystone-moon/keystone/controllers.py
index 12f13c77..085c1fb0 100644
--- a/keystone-moon/keystone/controllers.py
+++ b/keystone-moon/keystone/controllers.py
@@ -63,7 +63,7 @@ class Extensions(wsgi.Application):
return None
def get_extensions_info(self, context):
- return {'extensions': {'values': self.extensions.values()}}
+ return {'extensions': {'values': list(self.extensions.values())}}
def get_extension_info(self, context, extension_alias):
try:
@@ -146,9 +146,9 @@ class Version(wsgi.Application):
if 'v3' in _VERSIONS:
versions['v3'] = {
- 'id': 'v3.0',
+ 'id': 'v3.4',
'status': 'stable',
- 'updated': '2013-03-06T00:00:00Z',
+ 'updated': '2015-03-30T00:00:00Z',
'links': [
{
'rel': 'self',
@@ -177,7 +177,7 @@ class Version(wsgi.Application):
versions = self._get_versions_list(context)
return wsgi.render_response(status=(300, 'Multiple Choices'), body={
'versions': {
- 'values': versions.values()
+ 'values': list(versions.values())
}
})
diff --git a/keystone-moon/keystone/credential/core.py b/keystone-moon/keystone/credential/core.py
index d3354ea3..2368439e 100644
--- a/keystone-moon/keystone/credential/core.py
+++ b/keystone-moon/keystone/credential/core.py
@@ -12,7 +12,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-"""Main entry point into the Credentials service."""
+"""Main entry point into the Credential service."""
import abc
@@ -40,6 +40,8 @@ class Manager(manager.Manager):
"""
+ driver_namespace = 'keystone.credential'
+
def __init__(self):
super(Manager, self).__init__(CONF.credential.driver)
diff --git a/keystone-moon/keystone/endpoint_policy/__init__.py b/keystone-moon/keystone/endpoint_policy/__init__.py
new file mode 100644
index 00000000..c8ae5e68
--- /dev/null
+++ b/keystone-moon/keystone/endpoint_policy/__init__.py
@@ -0,0 +1,14 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from keystone.endpoint_policy.core import * # noqa
+from keystone.endpoint_policy import routers # noqa
diff --git a/keystone-moon/keystone/endpoint_policy/backends/__init__.py b/keystone-moon/keystone/endpoint_policy/backends/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/keystone-moon/keystone/endpoint_policy/backends/__init__.py
diff --git a/keystone-moon/keystone/endpoint_policy/backends/sql.py b/keystone-moon/keystone/endpoint_policy/backends/sql.py
new file mode 100644
index 00000000..484444f1
--- /dev/null
+++ b/keystone-moon/keystone/endpoint_policy/backends/sql.py
@@ -0,0 +1,140 @@
+# Copyright 2014 IBM Corp.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import uuid
+
+import sqlalchemy
+
+from keystone.common import sql
+from keystone import exception
+
+
+class PolicyAssociation(sql.ModelBase, sql.ModelDictMixin):
+ __tablename__ = 'policy_association'
+ attributes = ['policy_id', 'endpoint_id', 'region_id', 'service_id']
+ # The id column is never exposed outside this module. It only exists to
+ # provide a primary key, given that the real columns we would like to use
+ # (endpoint_id, service_id, region_id) can be null
+ id = sql.Column(sql.String(64), primary_key=True)
+ policy_id = sql.Column(sql.String(64), nullable=False)
+ endpoint_id = sql.Column(sql.String(64), nullable=True)
+ service_id = sql.Column(sql.String(64), nullable=True)
+ region_id = sql.Column(sql.String(64), nullable=True)
+ __table_args__ = (sql.UniqueConstraint('endpoint_id', 'service_id',
+ 'region_id'), {})
+
+ def to_dict(self):
+ """Returns the model's attributes as a dictionary.
+
+ We override the standard method in order to hide the id column,
+ since this only exists to provide the table with a primary key.
+
+ """
+ d = {}
+ for attr in self.__class__.attributes:
+ d[attr] = getattr(self, attr)
+ return d
+
+
+class EndpointPolicy(object):
+
+ def create_policy_association(self, policy_id, endpoint_id=None,
+ service_id=None, region_id=None):
+ with sql.transaction() as session:
+ try:
+ # See if there is already a row for this association, and if
+ # so, update it with the new policy_id
+ query = session.query(PolicyAssociation)
+ query = query.filter_by(endpoint_id=endpoint_id)
+ query = query.filter_by(service_id=service_id)
+ query = query.filter_by(region_id=region_id)
+ association = query.one()
+ association.policy_id = policy_id
+ except sql.NotFound:
+ association = PolicyAssociation(id=uuid.uuid4().hex,
+ policy_id=policy_id,
+ endpoint_id=endpoint_id,
+ service_id=service_id,
+ region_id=region_id)
+ session.add(association)
+
+ def check_policy_association(self, policy_id, endpoint_id=None,
+ service_id=None, region_id=None):
+ sql_constraints = sqlalchemy.and_(
+ PolicyAssociation.policy_id == policy_id,
+ PolicyAssociation.endpoint_id == endpoint_id,
+ PolicyAssociation.service_id == service_id,
+ PolicyAssociation.region_id == region_id)
+
+ # NOTE(henry-nash): Getting a single value to save object
+ # management overhead.
+ with sql.transaction() as session:
+ if session.query(PolicyAssociation.id).filter(
+ sql_constraints).distinct().count() == 0:
+ raise exception.PolicyAssociationNotFound()
+
+ def delete_policy_association(self, policy_id, endpoint_id=None,
+ service_id=None, region_id=None):
+ with sql.transaction() as session:
+ query = session.query(PolicyAssociation)
+ query = query.filter_by(policy_id=policy_id)
+ query = query.filter_by(endpoint_id=endpoint_id)
+ query = query.filter_by(service_id=service_id)
+ query = query.filter_by(region_id=region_id)
+ query.delete()
+
+ def get_policy_association(self, endpoint_id=None,
+ service_id=None, region_id=None):
+ sql_constraints = sqlalchemy.and_(
+ PolicyAssociation.endpoint_id == endpoint_id,
+ PolicyAssociation.service_id == service_id,
+ PolicyAssociation.region_id == region_id)
+
+ try:
+ with sql.transaction() as session:
+ policy_id = session.query(PolicyAssociation.policy_id).filter(
+ sql_constraints).distinct().one()
+ return {'policy_id': policy_id}
+ except sql.NotFound:
+ raise exception.PolicyAssociationNotFound()
+
+ def list_associations_for_policy(self, policy_id):
+ with sql.transaction() as session:
+ query = session.query(PolicyAssociation)
+ query = query.filter_by(policy_id=policy_id)
+ return [ref.to_dict() for ref in query.all()]
+
+ def delete_association_by_endpoint(self, endpoint_id):
+ with sql.transaction() as session:
+ query = session.query(PolicyAssociation)
+ query = query.filter_by(endpoint_id=endpoint_id)
+ query.delete()
+
+ def delete_association_by_service(self, service_id):
+ with sql.transaction() as session:
+ query = session.query(PolicyAssociation)
+ query = query.filter_by(service_id=service_id)
+ query.delete()
+
+ def delete_association_by_region(self, region_id):
+ with sql.transaction() as session:
+ query = session.query(PolicyAssociation)
+ query = query.filter_by(region_id=region_id)
+ query.delete()
+
+ def delete_association_by_policy(self, policy_id):
+ with sql.transaction() as session:
+ query = session.query(PolicyAssociation)
+ query = query.filter_by(policy_id=policy_id)
+ query.delete()
diff --git a/keystone-moon/keystone/endpoint_policy/controllers.py b/keystone-moon/keystone/endpoint_policy/controllers.py
new file mode 100644
index 00000000..b96834dc
--- /dev/null
+++ b/keystone-moon/keystone/endpoint_policy/controllers.py
@@ -0,0 +1,166 @@
+# Copyright 2014 IBM Corp.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from keystone.common import controller
+from keystone.common import dependency
+from keystone import notifications
+
+
+@dependency.requires('policy_api', 'catalog_api', 'endpoint_policy_api')
+class EndpointPolicyV3Controller(controller.V3Controller):
+ collection_name = 'endpoints'
+ member_name = 'endpoint'
+
+ def __init__(self):
+ super(EndpointPolicyV3Controller, self).__init__()
+ notifications.register_event_callback(
+ 'deleted', 'endpoint', self._on_endpoint_delete)
+ notifications.register_event_callback(
+ 'deleted', 'service', self._on_service_delete)
+ notifications.register_event_callback(
+ 'deleted', 'region', self._on_region_delete)
+ notifications.register_event_callback(
+ 'deleted', 'policy', self._on_policy_delete)
+
+ def _on_endpoint_delete(self, service, resource_type, operation, payload):
+ self.endpoint_policy_api.delete_association_by_endpoint(
+ payload['resource_info'])
+
+ def _on_service_delete(self, service, resource_type, operation, payload):
+ self.endpoint_policy_api.delete_association_by_service(
+ payload['resource_info'])
+
+ def _on_region_delete(self, service, resource_type, operation, payload):
+ self.endpoint_policy_api.delete_association_by_region(
+ payload['resource_info'])
+
+ def _on_policy_delete(self, service, resource_type, operation, payload):
+ self.endpoint_policy_api.delete_association_by_policy(
+ payload['resource_info'])
+
+ @controller.protected()
+ def create_policy_association_for_endpoint(self, context,
+ policy_id, endpoint_id):
+ """Create an association between a policy and an endpoint."""
+ self.policy_api.get_policy(policy_id)
+ self.catalog_api.get_endpoint(endpoint_id)
+ self.endpoint_policy_api.create_policy_association(
+ policy_id, endpoint_id=endpoint_id)
+
+ @controller.protected()
+ def check_policy_association_for_endpoint(self, context,
+ policy_id, endpoint_id):
+ """Check an association between a policy and an endpoint."""
+ self.policy_api.get_policy(policy_id)
+ self.catalog_api.get_endpoint(endpoint_id)
+ self.endpoint_policy_api.check_policy_association(
+ policy_id, endpoint_id=endpoint_id)
+
+ @controller.protected()
+ def delete_policy_association_for_endpoint(self, context,
+ policy_id, endpoint_id):
+ """Delete an association between a policy and an endpoint."""
+ self.policy_api.get_policy(policy_id)
+ self.catalog_api.get_endpoint(endpoint_id)
+ self.endpoint_policy_api.delete_policy_association(
+ policy_id, endpoint_id=endpoint_id)
+
+ @controller.protected()
+ def create_policy_association_for_service(self, context,
+ policy_id, service_id):
+ """Create an association between a policy and a service."""
+ self.policy_api.get_policy(policy_id)
+ self.catalog_api.get_service(service_id)
+ self.endpoint_policy_api.create_policy_association(
+ policy_id, service_id=service_id)
+
+ @controller.protected()
+ def check_policy_association_for_service(self, context,
+ policy_id, service_id):
+ """Check an association between a policy and a service."""
+ self.policy_api.get_policy(policy_id)
+ self.catalog_api.get_service(service_id)
+ self.endpoint_policy_api.check_policy_association(
+ policy_id, service_id=service_id)
+
+ @controller.protected()
+ def delete_policy_association_for_service(self, context,
+ policy_id, service_id):
+ """Delete an association between a policy and a service."""
+ self.policy_api.get_policy(policy_id)
+ self.catalog_api.get_service(service_id)
+ self.endpoint_policy_api.delete_policy_association(
+ policy_id, service_id=service_id)
+
+ @controller.protected()
+ def create_policy_association_for_region_and_service(
+ self, context, policy_id, service_id, region_id):
+ """Create an association between a policy and region+service."""
+ self.policy_api.get_policy(policy_id)
+ self.catalog_api.get_service(service_id)
+ self.catalog_api.get_region(region_id)
+ self.endpoint_policy_api.create_policy_association(
+ policy_id, service_id=service_id, region_id=region_id)
+
+ @controller.protected()
+ def check_policy_association_for_region_and_service(
+ self, context, policy_id, service_id, region_id):
+ """Check an association between a policy and region+service."""
+ self.policy_api.get_policy(policy_id)
+ self.catalog_api.get_service(service_id)
+ self.catalog_api.get_region(region_id)
+ self.endpoint_policy_api.check_policy_association(
+ policy_id, service_id=service_id, region_id=region_id)
+
+ @controller.protected()
+ def delete_policy_association_for_region_and_service(
+ self, context, policy_id, service_id, region_id):
+ """Delete an association between a policy and region+service."""
+ self.policy_api.get_policy(policy_id)
+ self.catalog_api.get_service(service_id)
+ self.catalog_api.get_region(region_id)
+ self.endpoint_policy_api.delete_policy_association(
+ policy_id, service_id=service_id, region_id=region_id)
+
+ @controller.protected()
+ def get_policy_for_endpoint(self, context, endpoint_id):
+ """Get the effective policy for an endpoint."""
+ self.catalog_api.get_endpoint(endpoint_id)
+ ref = self.endpoint_policy_api.get_policy_for_endpoint(endpoint_id)
+ # NOTE(henry-nash): since the collection and member for this class is
+ # set to endpoints, we have to handle wrapping this policy entity
+ # ourselves.
+ self._add_self_referential_link(context, ref)
+ return {'policy': ref}
+
+ # NOTE(henry-nash): As in the catalog controller, we must ensure that the
+ # legacy_endpoint_id does not escape.
+
+ @classmethod
+ def filter_endpoint(cls, ref):
+ if 'legacy_endpoint_id' in ref:
+ ref.pop('legacy_endpoint_id')
+ return ref
+
+ @classmethod
+ def wrap_member(cls, context, ref):
+ ref = cls.filter_endpoint(ref)
+ return super(EndpointPolicyV3Controller, cls).wrap_member(context, ref)
+
+ @controller.protected()
+ def list_endpoints_for_policy(self, context, policy_id):
+ """List endpoints with the effective association to a policy."""
+ self.policy_api.get_policy(policy_id)
+ refs = self.endpoint_policy_api.list_endpoints_for_policy(policy_id)
+ return EndpointPolicyV3Controller.wrap_collection(context, refs)
diff --git a/keystone-moon/keystone/endpoint_policy/core.py b/keystone-moon/keystone/endpoint_policy/core.py
new file mode 100644
index 00000000..3e8026e6
--- /dev/null
+++ b/keystone-moon/keystone/endpoint_policy/core.py
@@ -0,0 +1,433 @@
+# Copyright 2014 IBM Corp.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import abc
+
+from oslo_config import cfg
+from oslo_log import log
+import six
+
+from keystone.common import dependency
+from keystone.common import manager
+from keystone import exception
+from keystone.i18n import _, _LE, _LW
+
+
+CONF = cfg.CONF
+LOG = log.getLogger(__name__)
+
+
+@dependency.provider('endpoint_policy_api')
+@dependency.requires('catalog_api', 'policy_api')
+class Manager(manager.Manager):
+ """Default pivot point for the Endpoint Policy backend.
+
+ See :mod:`keystone.common.manager.Manager` for more details on how this
+ dynamically calls the backend.
+
+ """
+
+ driver_namespace = 'keystone.endpoint_policy'
+
+ def __init__(self):
+ super(Manager, self).__init__(CONF.endpoint_policy.driver)
+
+ def _assert_valid_association(self, endpoint_id, service_id, region_id):
+ """Assert that the association is supported.
+
+ There are three types of association supported:
+
+ - Endpoint (in which case service and region must be None)
+ - Service and region (in which endpoint must be None)
+ - Service (in which case endpoint and region must be None)
+
+ """
+ if (endpoint_id is not None and
+ service_id is None and region_id is None):
+ return
+ if (service_id is not None and region_id is not None and
+ endpoint_id is None):
+ return
+ if (service_id is not None and
+ endpoint_id is None and region_id is None):
+ return
+
+ raise exception.InvalidPolicyAssociation(endpoint_id=endpoint_id,
+ service_id=service_id,
+ region_id=region_id)
+
+ def create_policy_association(self, policy_id, endpoint_id=None,
+ service_id=None, region_id=None):
+ self._assert_valid_association(endpoint_id, service_id, region_id)
+ self.driver.create_policy_association(policy_id, endpoint_id,
+ service_id, region_id)
+
+ def check_policy_association(self, policy_id, endpoint_id=None,
+ service_id=None, region_id=None):
+ self._assert_valid_association(endpoint_id, service_id, region_id)
+ self.driver.check_policy_association(policy_id, endpoint_id,
+ service_id, region_id)
+
+ def delete_policy_association(self, policy_id, endpoint_id=None,
+ service_id=None, region_id=None):
+ self._assert_valid_association(endpoint_id, service_id, region_id)
+ self.driver.delete_policy_association(policy_id, endpoint_id,
+ service_id, region_id)
+
+ def list_endpoints_for_policy(self, policy_id):
+
+ def _get_endpoint(endpoint_id, policy_id):
+ try:
+ return self.catalog_api.get_endpoint(endpoint_id)
+ except exception.EndpointNotFound:
+ msg = _LW('Endpoint %(endpoint_id)s referenced in '
+ 'association for policy %(policy_id)s not found.')
+ LOG.warning(msg, {'policy_id': policy_id,
+ 'endpoint_id': endpoint_id})
+ raise
+
+ def _get_endpoints_for_service(service_id, endpoints):
+ # TODO(henry-nash): Consider optimizing this in the future by
+ # adding an explicit list_endpoints_for_service to the catalog API.
+ return [ep for ep in endpoints if ep['service_id'] == service_id]
+
+ def _get_endpoints_for_service_and_region(
+ service_id, region_id, endpoints, regions):
+ # TODO(henry-nash): Consider optimizing this in the future.
+ # The lack of a two-way pointer in the region tree structure
+ # makes this somewhat inefficient.
+
+ def _recursively_get_endpoints_for_region(
+ region_id, service_id, endpoint_list, region_list,
+ endpoints_found, regions_examined):
+ """Recursively search down a region tree for endpoints.
+
+ :param region_id: the point in the tree to examine
+ :param service_id: the service we are interested in
+ :param endpoint_list: list of all endpoints
+ :param region_list: list of all regions
+ :param endpoints_found: list of matching endpoints found so
+ far - which will be updated if more are
+ found in this iteration
+ :param regions_examined: list of regions we have already looked
+ at - used to spot illegal circular
+ references in the tree to avoid never
+ completing search
+ :returns: list of endpoints that match
+
+ """
+
+ if region_id in regions_examined:
+ msg = _LE('Circular reference or a repeated entry found '
+ 'in region tree - %(region_id)s.')
+ LOG.error(msg, {'region_id': ref.region_id})
+ return
+
+ regions_examined.append(region_id)
+ endpoints_found += (
+ [ep for ep in endpoint_list if
+ ep['service_id'] == service_id and
+ ep['region_id'] == region_id])
+
+ for region in region_list:
+ if region['parent_region_id'] == region_id:
+ _recursively_get_endpoints_for_region(
+ region['id'], service_id, endpoints, regions,
+ endpoints_found, regions_examined)
+
+ endpoints_found = []
+ regions_examined = []
+
+ # Now walk down the region tree
+ _recursively_get_endpoints_for_region(
+ region_id, service_id, endpoints, regions,
+ endpoints_found, regions_examined)
+
+ return endpoints_found
+
+ matching_endpoints = []
+ endpoints = self.catalog_api.list_endpoints()
+ regions = self.catalog_api.list_regions()
+ for ref in self.driver.list_associations_for_policy(policy_id):
+ if ref.get('endpoint_id') is not None:
+ matching_endpoints.append(
+ _get_endpoint(ref['endpoint_id'], policy_id))
+ continue
+
+ if (ref.get('service_id') is not None and
+ ref.get('region_id') is None):
+ matching_endpoints += _get_endpoints_for_service(
+ ref['service_id'], endpoints)
+ continue
+
+ if (ref.get('service_id') is not None and
+ ref.get('region_id') is not None):
+ matching_endpoints += (
+ _get_endpoints_for_service_and_region(
+ ref['service_id'], ref['region_id'],
+ endpoints, regions))
+ continue
+
+ msg = _LW('Unsupported policy association found - '
+ 'Policy %(policy_id)s, Endpoint %(endpoint_id)s, '
+ 'Service %(service_id)s, Region %(region_id)s, ')
+ LOG.warning(msg, {'policy_id': policy_id,
+ 'endpoint_id': ref['endpoint_id'],
+ 'service_id': ref['service_id'],
+ 'region_id': ref['region_id']})
+
+ return matching_endpoints
+
+ def get_policy_for_endpoint(self, endpoint_id):
+
+ def _get_policy(policy_id, endpoint_id):
+ try:
+ return self.policy_api.get_policy(policy_id)
+ except exception.PolicyNotFound:
+ msg = _LW('Policy %(policy_id)s referenced in association '
+ 'for endpoint %(endpoint_id)s not found.')
+ LOG.warning(msg, {'policy_id': policy_id,
+ 'endpoint_id': endpoint_id})
+ raise
+
+ def _look_for_policy_for_region_and_service(endpoint):
+ """Look in the region and its parents for a policy.
+
+ Examine the region of the endpoint for a policy appropriate for
+ the service of the endpoint. If there isn't a match, then chase up
+ the region tree to find one.
+
+ """
+ region_id = endpoint['region_id']
+ regions_examined = []
+ while region_id is not None:
+ try:
+ ref = self.driver.get_policy_association(
+ service_id=endpoint['service_id'],
+ region_id=region_id)
+ return ref['policy_id']
+ except exception.PolicyAssociationNotFound:
+ pass
+
+ # There wasn't one for that region & service, let's
+ # chase up the region tree
+ regions_examined.append(region_id)
+ region = self.catalog_api.get_region(region_id)
+ region_id = None
+ if region.get('parent_region_id') is not None:
+ region_id = region['parent_region_id']
+ if region_id in regions_examined:
+ msg = _LE('Circular reference or a repeated entry '
+ 'found in region tree - %(region_id)s.')
+ LOG.error(msg, {'region_id': region_id})
+ break
+
+ # First let's see if there is a policy explicitly defined for
+ # this endpoint.
+
+ try:
+ ref = self.driver.get_policy_association(endpoint_id=endpoint_id)
+ return _get_policy(ref['policy_id'], endpoint_id)
+ except exception.PolicyAssociationNotFound:
+ pass
+
+ # There wasn't a policy explicitly defined for this endpoint, so
+ # now let's see if there is one for the Region & Service.
+
+ endpoint = self.catalog_api.get_endpoint(endpoint_id)
+ policy_id = _look_for_policy_for_region_and_service(endpoint)
+ if policy_id is not None:
+ return _get_policy(policy_id, endpoint_id)
+
+ # Finally, just check if there is one for the service.
+ try:
+ ref = self.driver.get_policy_association(
+ service_id=endpoint['service_id'])
+ return _get_policy(ref['policy_id'], endpoint_id)
+ except exception.PolicyAssociationNotFound:
+ pass
+
+ msg = _('No policy is associated with endpoint '
+ '%(endpoint_id)s.') % {'endpoint_id': endpoint_id}
+ raise exception.NotFound(msg)
+
+
+@six.add_metaclass(abc.ABCMeta)
+class Driver(object):
+ """Interface description for an Endpoint Policy driver."""
+
+ @abc.abstractmethod
+ def create_policy_association(self, policy_id, endpoint_id=None,
+ service_id=None, region_id=None):
+ """Creates a policy association.
+
+ :param policy_id: identity of policy that is being associated
+ :type policy_id: string
+ :param endpoint_id: identity of endpoint to associate
+ :type endpoint_id: string
+ :param service_id: identity of the service to associate
+ :type service_id: string
+ :param region_id: identity of the region to associate
+ :type region_id: string
+ :returns: None
+
+ There are three types of association permitted:
+
+ - Endpoint (in which case service and region must be None)
+ - Service and region (in which endpoint must be None)
+ - Service (in which case endpoint and region must be None)
+
+ """
+ raise exception.NotImplemented() # pragma: no cover
+
+ @abc.abstractmethod
+ def check_policy_association(self, policy_id, endpoint_id=None,
+ service_id=None, region_id=None):
+ """Checks existence a policy association.
+
+ :param policy_id: identity of policy that is being associated
+ :type policy_id: string
+ :param endpoint_id: identity of endpoint to associate
+ :type endpoint_id: string
+ :param service_id: identity of the service to associate
+ :type service_id: string
+ :param region_id: identity of the region to associate
+ :type region_id: string
+ :raises: keystone.exception.PolicyAssociationNotFound if there is no
+ match for the specified association
+ :returns: None
+
+ """
+ raise exception.NotImplemented() # pragma: no cover
+
+ @abc.abstractmethod
+ def delete_policy_association(self, policy_id, endpoint_id=None,
+ service_id=None, region_id=None):
+ """Deletes a policy association.
+
+ :param policy_id: identity of policy that is being associated
+ :type policy_id: string
+ :param endpoint_id: identity of endpoint to associate
+ :type endpoint_id: string
+ :param service_id: identity of the service to associate
+ :type service_id: string
+ :param region_id: identity of the region to associate
+ :type region_id: string
+ :returns: None
+
+ """
+ raise exception.NotImplemented() # pragma: no cover
+
+ @abc.abstractmethod
+ def get_policy_association(self, endpoint_id=None,
+ service_id=None, region_id=None):
+ """Gets the policy for an explicit association.
+
+ This method is not exposed as a public API, but is used by
+ get_policy_for_endpoint().
+
+ :param endpoint_id: identity of endpoint
+ :type endpoint_id: string
+ :param service_id: identity of the service
+ :type service_id: string
+ :param region_id: identity of the region
+ :type region_id: string
+ :raises: keystone.exception.PolicyAssociationNotFound if there is no
+ match for the specified association
+ :returns: dict containing policy_id
+
+ """
+ raise exception.NotImplemented() # pragma: no cover
+
+ @abc.abstractmethod
+ def list_associations_for_policy(self, policy_id):
+ """List the associations for a policy.
+
+ This method is not exposed as a public API, but is used by
+ list_endpoints_for_policy().
+
+ :param policy_id: identity of policy
+ :type policy_id: string
+ :returns: List of association dicts
+
+ """
+ raise exception.NotImplemented() # pragma: no cover
+
+ @abc.abstractmethod
+ def list_endpoints_for_policy(self, policy_id):
+ """List all the endpoints using a given policy.
+
+ :param policy_id: identity of policy that is being associated
+ :type policy_id: string
+ :returns: list of endpoints that have an effective association with
+ that policy
+
+ """
+ raise exception.NotImplemented() # pragma: no cover
+
+ @abc.abstractmethod
+ def get_policy_for_endpoint(self, endpoint_id):
+ """Get the appropriate policy for a given endpoint.
+
+ :param endpoint_id: identity of endpoint
+ :type endpoint_id: string
+ :returns: Policy entity for the endpoint
+
+
+ """
+ raise exception.NotImplemented() # pragma: no cover
+
+ @abc.abstractmethod
+ def delete_association_by_endpoint(self, endpoint_id):
+ """Removes all the policy associations with the specific endpoint.
+
+ :param endpoint_id: identity of endpoint to check
+ :type endpoint_id: string
+ :returns: None
+
+ """
+ raise exception.NotImplemented() # pragma: no cover
+
+ @abc.abstractmethod
+ def delete_association_by_service(self, service_id):
+ """Removes all the policy associations with the specific service.
+
+ :param service_id: identity of endpoint to check
+ :type service_id: string
+ :returns: None
+
+ """
+ raise exception.NotImplemented() # pragma: no cover
+
+ @abc.abstractmethod
+ def delete_association_by_region(self, region_id):
+ """Removes all the policy associations with the specific region.
+
+ :param region_id: identity of endpoint to check
+ :type region_id: string
+ :returns: None
+
+ """
+ raise exception.NotImplemented() # pragma: no cover
+
+ @abc.abstractmethod
+ def delete_association_by_policy(self, policy_id):
+ """Removes all the policy associations with the specific policy.
+
+ :param policy_id: identity of endpoint to check
+ :type policy_id: string
+ :returns: None
+
+ """
+ raise exception.NotImplemented() # pragma: no cover
diff --git a/keystone-moon/keystone/endpoint_policy/routers.py b/keystone-moon/keystone/endpoint_policy/routers.py
new file mode 100644
index 00000000..4846bb18
--- /dev/null
+++ b/keystone-moon/keystone/endpoint_policy/routers.py
@@ -0,0 +1,85 @@
+# Copyright 2014 IBM Corp.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import functools
+
+from keystone.common import json_home
+from keystone.common import wsgi
+from keystone.endpoint_policy import controllers
+
+
+build_resource_relation = functools.partial(
+ json_home.build_v3_extension_resource_relation,
+ extension_name='OS-ENDPOINT-POLICY', extension_version='1.0')
+
+
+class Routers(wsgi.RoutersBase):
+
+ PATH_PREFIX = '/OS-ENDPOINT-POLICY'
+
+ def append_v3_routers(self, mapper, routers):
+ endpoint_policy_controller = controllers.EndpointPolicyV3Controller()
+
+ self._add_resource(
+ mapper, endpoint_policy_controller,
+ path='/endpoints/{endpoint_id}' + self.PATH_PREFIX + '/policy',
+ get_head_action='get_policy_for_endpoint',
+ rel=build_resource_relation(resource_name='endpoint_policy'),
+ path_vars={'endpoint_id': json_home.Parameters.ENDPOINT_ID})
+ self._add_resource(
+ mapper, endpoint_policy_controller,
+ path='/policies/{policy_id}' + self.PATH_PREFIX + '/endpoints',
+ get_action='list_endpoints_for_policy',
+ rel=build_resource_relation(resource_name='policy_endpoints'),
+ path_vars={'policy_id': json_home.Parameters.POLICY_ID})
+ self._add_resource(
+ mapper, endpoint_policy_controller,
+ path=('/policies/{policy_id}' + self.PATH_PREFIX +
+ '/endpoints/{endpoint_id}'),
+ get_head_action='check_policy_association_for_endpoint',
+ put_action='create_policy_association_for_endpoint',
+ delete_action='delete_policy_association_for_endpoint',
+ rel=build_resource_relation(
+ resource_name='endpoint_policy_association'),
+ path_vars={
+ 'policy_id': json_home.Parameters.POLICY_ID,
+ 'endpoint_id': json_home.Parameters.ENDPOINT_ID,
+ })
+ self._add_resource(
+ mapper, endpoint_policy_controller,
+ path=('/policies/{policy_id}' + self.PATH_PREFIX +
+ '/services/{service_id}'),
+ get_head_action='check_policy_association_for_service',
+ put_action='create_policy_association_for_service',
+ delete_action='delete_policy_association_for_service',
+ rel=build_resource_relation(
+ resource_name='service_policy_association'),
+ path_vars={
+ 'policy_id': json_home.Parameters.POLICY_ID,
+ 'service_id': json_home.Parameters.SERVICE_ID,
+ })
+ self._add_resource(
+ mapper, endpoint_policy_controller,
+ path=('/policies/{policy_id}' + self.PATH_PREFIX +
+ '/services/{service_id}/regions/{region_id}'),
+ get_head_action='check_policy_association_for_region_and_service',
+ put_action='create_policy_association_for_region_and_service',
+ delete_action='delete_policy_association_for_region_and_service',
+ rel=build_resource_relation(
+ resource_name='region_and_service_policy_association'),
+ path_vars={
+ 'policy_id': json_home.Parameters.POLICY_ID,
+ 'service_id': json_home.Parameters.SERVICE_ID,
+ 'region_id': json_home.Parameters.REGION_ID,
+ })
diff --git a/keystone-moon/keystone/exception.py b/keystone-moon/keystone/exception.py
index 6749fdcd..8e573c4c 100644
--- a/keystone-moon/keystone/exception.py
+++ b/keystone-moon/keystone/exception.py
@@ -15,7 +15,6 @@
from oslo_config import cfg
from oslo_log import log
from oslo_utils import encodeutils
-import six
from keystone.i18n import _, _LW
@@ -63,7 +62,7 @@ class Error(Exception):
except UnicodeDecodeError:
try:
kwargs = {k: encodeutils.safe_decode(v)
- for k, v in six.iteritems(kwargs)}
+ for k, v in kwargs.items()}
except UnicodeDecodeError:
# NOTE(jamielennox): This is the complete failure case
# at least by showing the template we have some idea
@@ -84,6 +83,11 @@ class ValidationError(Error):
title = 'Bad Request'
+class URLValidationError(ValidationError):
+ message_format = _("Cannot create an endpoint with an invalid URL:"
+ " %(url)s")
+
+
class SchemaValidationError(ValidationError):
# NOTE(lbragstad): For whole OpenStack message consistency, this error
# message has been written in a format consistent with WSME.
@@ -99,6 +103,15 @@ class ValidationTimeStampError(Error):
title = 'Bad Request'
+class ValidationExpirationError(Error):
+ message_format = _("The 'expires_at' must not be before now."
+ " The server could not comply with the request"
+ " since it is either malformed or otherwise"
+ " incorrect. The client is assumed to be in error.")
+ code = 400
+ title = 'Bad Request'
+
+
class StringLengthExceeded(ValidationError):
message_format = _("String length exceeded.The length of"
" string '%(string)s' exceeded the limit"
@@ -448,9 +461,9 @@ class MigrationNotProvided(Exception):
) % {'mod_name': mod_name, 'path': path})
-class UnsupportedTokenVersionException(Exception):
- """Token version is unrecognizable or unsupported."""
- pass
+class UnsupportedTokenVersionException(UnexpectedError):
+ message_format = _('Token version is unrecognizable or '
+ 'unsupported.')
class SAMLSigningError(UnexpectedError):
diff --git a/keystone-moon/keystone/identity/backends/ldap.py b/keystone-moon/keystone/identity/backends/ldap.py
index 0f7ee450..7a3cb03b 100644
--- a/keystone-moon/keystone/identity/backends/ldap.py
+++ b/keystone-moon/keystone/identity/backends/ldap.py
@@ -14,13 +14,12 @@
from __future__ import absolute_import
import uuid
-import ldap
import ldap.filter
from oslo_config import cfg
from oslo_log import log
import six
-from keystone import clean
+from keystone.common import clean
from keystone.common import driver_hints
from keystone.common import ldap as common_ldap
from keystone.common import models
@@ -42,7 +41,7 @@ class Identity(identity.Driver):
self.group = GroupApi(conf)
def default_assignment_driver(self):
- return "keystone.assignment.backends.ldap.Assignment"
+ return 'ldap'
def is_domain_aware(self):
return False
@@ -352,20 +351,18 @@ class GroupApi(common_ldap.BaseLdap):
"""Return a list of groups for which the user is a member."""
user_dn_esc = ldap.filter.escape_filter_chars(user_dn)
- query = '(&(objectClass=%s)(%s=%s)%s)' % (self.object_class,
- self.member_attribute,
- user_dn_esc,
- self.ldap_filter or '')
+ query = '(%s=%s)%s' % (self.member_attribute,
+ user_dn_esc,
+ self.ldap_filter or '')
return self.get_all(query)
def list_user_groups_filtered(self, user_dn, hints):
"""Return a filtered list of groups for which the user is a member."""
user_dn_esc = ldap.filter.escape_filter_chars(user_dn)
- query = '(&(objectClass=%s)(%s=%s)%s)' % (self.object_class,
- self.member_attribute,
- user_dn_esc,
- self.ldap_filter or '')
+ query = '(%s=%s)%s' % (self.member_attribute,
+ user_dn_esc,
+ self.ldap_filter or '')
return self.get_all_filtered(hints, query)
def list_group_users(self, group_id):
diff --git a/keystone-moon/keystone/identity/backends/sql.py b/keystone-moon/keystone/identity/backends/sql.py
index 39868416..8bda9a1b 100644
--- a/keystone-moon/keystone/identity/backends/sql.py
+++ b/keystone-moon/keystone/identity/backends/sql.py
@@ -77,7 +77,7 @@ class Identity(identity.Driver):
super(Identity, self).__init__()
def default_assignment_driver(self):
- return "keystone.assignment.backends.sql.Assignment"
+ return 'sql'
@property
def is_sql(self):
@@ -211,28 +211,19 @@ class Identity(identity.Driver):
session.delete(membership_ref)
def list_groups_for_user(self, user_id, hints):
- # TODO(henry-nash) We could implement full filtering here by enhancing
- # the join below. However, since it is likely to be a fairly rare
- # occurrence to filter on more than the user_id already being used
- # here, this is left as future enhancement and until then we leave
- # it for the controller to do for us.
session = sql.get_session()
self.get_user(user_id)
query = session.query(Group).join(UserGroupMembership)
query = query.filter(UserGroupMembership.user_id == user_id)
+ query = sql.filter_limit_query(Group, query, hints)
return [g.to_dict() for g in query]
def list_users_in_group(self, group_id, hints):
- # TODO(henry-nash) We could implement full filtering here by enhancing
- # the join below. However, since it is likely to be a fairly rare
- # occurrence to filter on more than the group_id already being used
- # here, this is left as future enhancement and until then we leave
- # it for the controller to do for us.
session = sql.get_session()
self.get_group(group_id)
query = session.query(User).join(UserGroupMembership)
query = query.filter(UserGroupMembership.group_id == group_id)
-
+ query = sql.filter_limit_query(User, query, hints)
return [identity.filter_user(u.to_dict()) for u in query]
def delete_user(self, user_id):
diff --git a/keystone-moon/keystone/identity/controllers.py b/keystone-moon/keystone/identity/controllers.py
index a2676c41..7a6a642a 100644
--- a/keystone-moon/keystone/identity/controllers.py
+++ b/keystone-moon/keystone/identity/controllers.py
@@ -19,8 +19,10 @@ from oslo_log import log
from keystone.common import controller
from keystone.common import dependency
+from keystone.common import validation
from keystone import exception
from keystone.i18n import _, _LW
+from keystone.identity import schema
from keystone import notifications
@@ -205,9 +207,8 @@ class UserV3(controller.V3Controller):
self.check_protection(context, prep_info, ref)
@controller.protected()
+ @validation.validated(schema.user_create, 'user')
def create_user(self, context, user):
- self._require_attribute(user, 'name')
-
# The manager layer will generate the unique ID for users
ref = self._normalize_dict(user)
ref = self._normalize_domain_id(context, ref)
@@ -243,6 +244,7 @@ class UserV3(controller.V3Controller):
return UserV3.wrap_member(context, ref)
@controller.protected()
+ @validation.validated(schema.user_update, 'user')
def update_user(self, context, user_id, user):
return self._update_user(context, user_id, user)
@@ -291,9 +293,8 @@ class GroupV3(controller.V3Controller):
self.get_member_from_driver = self.identity_api.get_group
@controller.protected()
+ @validation.validated(schema.group_create, 'group')
def create_group(self, context, group):
- self._require_attribute(group, 'name')
-
# The manager layer will generate the unique ID for groups
ref = self._normalize_dict(group)
ref = self._normalize_domain_id(context, ref)
@@ -321,6 +322,7 @@ class GroupV3(controller.V3Controller):
return GroupV3.wrap_member(context, ref)
@controller.protected()
+ @validation.validated(schema.group_update, 'group')
def update_group(self, context, group_id, group):
self._require_matching_id(group_id, group)
self._require_matching_domain_id(
diff --git a/keystone-moon/keystone/identity/core.py b/keystone-moon/keystone/identity/core.py
index 988df78b..612a1859 100644
--- a/keystone-moon/keystone/identity/core.py
+++ b/keystone-moon/keystone/identity/core.py
@@ -21,11 +21,10 @@ import uuid
from oslo_config import cfg
from oslo_log import log
-from oslo_utils import importutils
import six
-from keystone import clean
from keystone.common import cache
+from keystone.common import clean
from keystone.common import dependency
from keystone.common import driver_hints
from keystone.common import manager
@@ -90,8 +89,9 @@ class DomainConfigs(dict):
_any_sql = False
def _load_driver(self, domain_config):
- return importutils.import_object(
- domain_config['cfg'].identity.driver, domain_config['cfg'])
+ return manager.load_driver(Manager.driver_namespace,
+ domain_config['cfg'].identity.driver,
+ domain_config['cfg'])
def _assert_no_more_than_one_sql_driver(self, domain_id, new_config,
config_file=None):
@@ -111,7 +111,7 @@ class DomainConfigs(dict):
if not config_file:
config_file = _('Database at /domains/%s/config') % domain_id
raise exception.MultipleSQLDriversInConfig(source=config_file)
- self._any_sql = new_config['driver'].is_sql
+ self._any_sql = self._any_sql or new_config['driver'].is_sql
def _load_config_from_file(self, resource_api, file_list, domain_name):
@@ -176,6 +176,21 @@ class DomainConfigs(dict):
fname)
def _load_config_from_database(self, domain_id, specific_config):
+
+ def _assert_not_sql_driver(domain_id, new_config):
+ """Ensure this is not an sql driver.
+
+ Due to multi-threading safety concerns, we do not currently support
+ the setting of a specific identity driver to sql via the Identity
+ API.
+
+ """
+ if new_config['driver'].is_sql:
+ reason = _('Domain specific sql drivers are not supported via '
+ 'the Identity API. One is specified in '
+ '/domains/%s/config') % domain_id
+ raise exception.InvalidDomainConfig(reason=reason)
+
domain_config = {}
domain_config['cfg'] = cfg.ConfigOpts()
config.configure(conf=domain_config['cfg'])
@@ -186,10 +201,12 @@ class DomainConfigs(dict):
for group in specific_config:
for option in specific_config[group]:
domain_config['cfg'].set_override(
- option, specific_config[group][option], group)
+ option, specific_config[group][option],
+ group, enforce_type=True)
+ domain_config['cfg_overrides'] = specific_config
domain_config['driver'] = self._load_driver(domain_config)
- self._assert_no_more_than_one_sql_driver(domain_id, domain_config)
+ _assert_not_sql_driver(domain_id, domain_config)
self[domain_id] = domain_config
def _setup_domain_drivers_from_database(self, standard_driver,
@@ -226,10 +243,12 @@ class DomainConfigs(dict):
resource_api)
def get_domain_driver(self, domain_id):
+ self.check_config_and_reload_domain_driver_if_required(domain_id)
if domain_id in self:
return self[domain_id]['driver']
def get_domain_conf(self, domain_id):
+ self.check_config_and_reload_domain_driver_if_required(domain_id)
if domain_id in self:
return self[domain_id]['cfg']
else:
@@ -249,6 +268,61 @@ class DomainConfigs(dict):
# The standard driver
self.driver = self.driver()
+ def check_config_and_reload_domain_driver_if_required(self, domain_id):
+ """Check for, and load, any new domain specific config for this domain.
+
+ This is only supported for the database-stored domain specific
+ configuration.
+
+ When the domain specific drivers were set up, we stored away the
+ specific config for this domain that was available at that time. So we
+ now read the current version and compare. While this might seem
+ somewhat inefficient, the sensitive config call is cached, so should be
+ light weight. More importantly, when the cache timeout is reached, we
+ will get any config that has been updated from any other keystone
+ process.
+
+ This cache-timeout approach works for both multi-process and
+ multi-threaded keystone configurations. In multi-threaded
+ configurations, even though we might remove a driver object (that
+ could be in use by another thread), this won't actually be thrown away
+ until all references to it have been broken. When that other
+ thread is released back and is restarted with another command to
+ process, next time it accesses the driver it will pickup the new one.
+
+ """
+ if (not CONF.identity.domain_specific_drivers_enabled or
+ not CONF.identity.domain_configurations_from_database):
+ # If specific drivers are not enabled, then there is nothing to do.
+ # If we are not storing the configurations in the database, then
+ # we'll only re-read the domain specific config files on startup
+ # of keystone.
+ return
+
+ latest_domain_config = (
+ self.domain_config_api.
+ get_config_with_sensitive_info(domain_id))
+ domain_config_in_use = domain_id in self
+
+ if latest_domain_config:
+ if (not domain_config_in_use or
+ latest_domain_config != self[domain_id]['cfg_overrides']):
+ self._load_config_from_database(domain_id,
+ latest_domain_config)
+ elif domain_config_in_use:
+ # The domain specific config has been deleted, so should remove the
+ # specific driver for this domain.
+ try:
+ del self[domain_id]
+ except KeyError:
+ # Allow this error in case we are unlucky and in a
+ # multi-threaded situation, two threads happen to be running
+ # in lock step.
+ pass
+ # If we fall into the else condition, this means there is no domain
+ # config set, and there is none in use either, so we have nothing
+ # to do.
+
def domains_configured(f):
"""Wraps API calls to lazy load domain configs after init.
@@ -291,6 +365,7 @@ def exception_translated(exception_type):
return _exception_translated
+@notifications.listener
@dependency.provider('identity_api')
@dependency.requires('assignment_api', 'credential_api', 'id_mapping_api',
'resource_api', 'revoke_api')
@@ -332,6 +407,9 @@ class Manager(manager.Manager):
mapping by default is a more prudent way to introduce this functionality.
"""
+
+ driver_namespace = 'keystone.identity'
+
_USER = 'user'
_GROUP = 'group'
@@ -521,10 +599,10 @@ class Manager(manager.Manager):
if (not driver.is_domain_aware() and driver == self.driver and
domain_id != CONF.identity.default_domain_id and
domain_id is not None):
- LOG.warning('Found multiple domains being mapped to a '
- 'driver that does not support that (e.g. '
- 'LDAP) - Domain ID: %(domain)s, '
- 'Default Driver: %(driver)s',
+ LOG.warning(_LW('Found multiple domains being mapped to a '
+ 'driver that does not support that (e.g. '
+ 'LDAP) - Domain ID: %(domain)s, '
+ 'Default Driver: %(driver)s'),
{'domain': domain_id,
'driver': (driver == self.driver)})
raise exception.DomainNotFound(domain_id=domain_id)
@@ -765,7 +843,7 @@ class Manager(manager.Manager):
# Get user details to invalidate the cache.
user_old = self.get_user(user_id)
driver.delete_user(entity_id)
- self.assignment_api.delete_user(user_id)
+ self.assignment_api.delete_user_assignments(user_id)
self.get_user.invalidate(self, user_id)
self.get_user_by_name.invalidate(self, user_old['name'],
user_old['domain_id'])
@@ -837,7 +915,7 @@ class Manager(manager.Manager):
driver.delete_group(entity_id)
self.get_group.invalidate(self, group_id)
self.id_mapping_api.delete_id_mapping(group_id)
- self.assignment_api.delete_group(group_id)
+ self.assignment_api.delete_group_assignments(group_id)
notifications.Audit.deleted(self._GROUP, group_id, initiator)
@@ -895,6 +973,19 @@ class Manager(manager.Manager):
"""
pass
+ @notifications.internal(
+ notifications.INVALIDATE_USER_PROJECT_TOKEN_PERSISTENCE)
+ def emit_invalidate_grant_token_persistence(self, user_project):
+ """Emit a notification to the callback system to revoke grant tokens.
+
+ This method and associated callback listener removes the need for
+ making a direct call to another manager to delete and revoke tokens.
+
+ :param user_project: {'user_id': user_id, 'project_id': project_id}
+ :type user_project: dict
+ """
+ pass
+
@manager.response_truncated
@domains_configured
@exception_translated('user')
@@ -1193,6 +1284,8 @@ class Driver(object):
class MappingManager(manager.Manager):
"""Default pivot point for the ID Mapping backend."""
+ driver_namespace = 'keystone.identity.id_mapping'
+
def __init__(self):
super(MappingManager, self).__init__(CONF.identity_mapping.driver)
diff --git a/keystone-moon/keystone/identity/generator.py b/keystone-moon/keystone/identity/generator.py
index d25426ce..05ad2df5 100644
--- a/keystone-moon/keystone/identity/generator.py
+++ b/keystone-moon/keystone/identity/generator.py
@@ -23,6 +23,7 @@ from keystone.common import dependency
from keystone.common import manager
from keystone import exception
+
CONF = cfg.CONF
@@ -30,6 +31,8 @@ CONF = cfg.CONF
class Manager(manager.Manager):
"""Default pivot point for the identifier generator backend."""
+ driver_namespace = 'keystone.identity.id_generator'
+
def __init__(self):
super(Manager, self).__init__(CONF.identity_mapping.generator)
diff --git a/keystone-moon/keystone/identity/schema.py b/keystone-moon/keystone/identity/schema.py
new file mode 100644
index 00000000..047fcf02
--- /dev/null
+++ b/keystone-moon/keystone/identity/schema.py
@@ -0,0 +1,67 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from keystone.common import validation
+from keystone.common.validation import parameter_types
+
+
+# NOTE(lhcheng): the max length is not applicable since it is specific
+# to the SQL backend, LDAP does not have length limitation.
+_identity_name = {
+ 'type': 'string',
+ 'minLength': 1
+}
+
+_user_properties = {
+ 'default_project_id': validation.nullable(parameter_types.id_string),
+ 'description': validation.nullable(parameter_types.description),
+ 'domain_id': parameter_types.id_string,
+ 'enabled': parameter_types.boolean,
+ 'name': _identity_name,
+ 'password': {
+ 'type': ['string', 'null']
+ }
+}
+
+user_create = {
+ 'type': 'object',
+ 'properties': _user_properties,
+ 'required': ['name'],
+ 'additionalProperties': True
+}
+
+user_update = {
+ 'type': 'object',
+ 'properties': _user_properties,
+ 'minProperties': 1,
+ 'additionalProperties': True
+}
+
+_group_properties = {
+ 'description': validation.nullable(parameter_types.description),
+ 'domain_id': parameter_types.id_string,
+ 'name': _identity_name
+}
+
+group_create = {
+ 'type': 'object',
+ 'properties': _group_properties,
+ 'required': ['name'],
+ 'additionalProperties': True
+}
+
+group_update = {
+ 'type': 'object',
+ 'properties': _group_properties,
+ 'minProperties': 1,
+ 'additionalProperties': True
+}
diff --git a/keystone-moon/keystone/locale/de/LC_MESSAGES/keystone-log-critical.po b/keystone-moon/keystone/locale/de/LC_MESSAGES/keystone-log-critical.po
index 8e4b6773..0403952d 100644
--- a/keystone-moon/keystone/locale/de/LC_MESSAGES/keystone-log-critical.po
+++ b/keystone-moon/keystone/locale/de/LC_MESSAGES/keystone-log-critical.po
@@ -1,5 +1,5 @@
# Translations template for keystone.
-# Copyright (C) 2014 OpenStack Foundation
+# Copyright (C) 2015 OpenStack Foundation
# This file is distributed under the same license as the keystone project.
#
# Translators:
@@ -7,19 +7,18 @@ msgid ""
msgstr ""
"Project-Id-Version: Keystone\n"
"Report-Msgid-Bugs-To: https://bugs.launchpad.net/keystone\n"
-"POT-Creation-Date: 2014-09-07 06:06+0000\n"
+"POT-Creation-Date: 2015-08-06 06:28+0000\n"
"PO-Revision-Date: 2014-08-31 15:19+0000\n"
"Last-Translator: openstackjenkins <jenkins@openstack.org>\n"
-"Language-Team: German (http://www.transifex.com/projects/p/keystone/language/"
+"Language-Team: German (http://www.transifex.com/openstack/keystone/language/"
"de/)\n"
"Language: de\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
-"Generated-By: Babel 1.3\n"
+"Generated-By: Babel 2.0\n"
"Plural-Forms: nplurals=2; plural=(n != 1);\n"
-#: keystone/catalog/backends/templated.py:106
#, python-format
msgid "Unable to open template file %s"
msgstr "Vorlagendatei %s kann nicht geöffnet werden"
diff --git a/keystone-moon/keystone/locale/en_AU/LC_MESSAGES/keystone-log-critical.po b/keystone-moon/keystone/locale/en_AU/LC_MESSAGES/keystone-log-critical.po
index d2f5ebe6..289fa43d 100644
--- a/keystone-moon/keystone/locale/en_AU/LC_MESSAGES/keystone-log-critical.po
+++ b/keystone-moon/keystone/locale/en_AU/LC_MESSAGES/keystone-log-critical.po
@@ -1,5 +1,5 @@
# Translations template for keystone.
-# Copyright (C) 2014 OpenStack Foundation
+# Copyright (C) 2015 OpenStack Foundation
# This file is distributed under the same license as the keystone project.
#
# Translators:
@@ -7,19 +7,18 @@ msgid ""
msgstr ""
"Project-Id-Version: Keystone\n"
"Report-Msgid-Bugs-To: https://bugs.launchpad.net/keystone\n"
-"POT-Creation-Date: 2014-09-07 06:06+0000\n"
+"POT-Creation-Date: 2015-08-06 06:28+0000\n"
"PO-Revision-Date: 2014-08-31 15:19+0000\n"
"Last-Translator: openstackjenkins <jenkins@openstack.org>\n"
-"Language-Team: English (Australia) (http://www.transifex.com/projects/p/"
+"Language-Team: English (Australia) (http://www.transifex.com/openstack/"
"keystone/language/en_AU/)\n"
"Language: en_AU\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
-"Generated-By: Babel 1.3\n"
+"Generated-By: Babel 2.0\n"
"Plural-Forms: nplurals=2; plural=(n != 1);\n"
-#: keystone/catalog/backends/templated.py:106
#, python-format
msgid "Unable to open template file %s"
msgstr "Unable to open template file %s"
diff --git a/keystone-moon/keystone/locale/en_AU/LC_MESSAGES/keystone-log-error.po b/keystone-moon/keystone/locale/en_AU/LC_MESSAGES/keystone-log-error.po
index 977af694..65b59aa3 100644
--- a/keystone-moon/keystone/locale/en_AU/LC_MESSAGES/keystone-log-error.po
+++ b/keystone-moon/keystone/locale/en_AU/LC_MESSAGES/keystone-log-error.po
@@ -7,77 +7,47 @@ msgid ""
msgstr ""
"Project-Id-Version: Keystone\n"
"Report-Msgid-Bugs-To: https://bugs.launchpad.net/keystone\n"
-"POT-Creation-Date: 2015-03-09 06:03+0000\n"
-"PO-Revision-Date: 2015-03-07 04:31+0000\n"
+"POT-Creation-Date: 2015-08-06 06:28+0000\n"
+"PO-Revision-Date: 2015-06-26 17:13+0000\n"
"Last-Translator: openstackjenkins <jenkins@openstack.org>\n"
-"Language-Team: English (Australia) (http://www.transifex.com/projects/p/"
+"Language-Team: English (Australia) (http://www.transifex.com/openstack/"
"keystone/language/en_AU/)\n"
"Language: en_AU\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
-"Generated-By: Babel 1.3\n"
+"Generated-By: Babel 2.0\n"
"Plural-Forms: nplurals=2; plural=(n != 1);\n"
-#: keystone/notifications.py:304
-msgid "Failed to construct notifier"
+msgid ""
+"Error setting up the debug environment. Verify that the option --debug-url "
+"has the format <host>:<port> and that a debugger processes is listening on "
+"that port."
msgstr ""
+"Error setting up the debug environment. Verify that the option --debug-url "
+"has the format <host>:<port> and that a debugger processes is listening on "
+"that port."
-#: keystone/notifications.py:389
#, python-format
msgid "Failed to send %(res_id)s %(event_type)s notification"
msgstr "Failed to send %(res_id)s %(event_type)s notification"
-#: keystone/notifications.py:606
-#, python-format
-msgid "Failed to send %(action)s %(event_type)s notification"
-msgstr ""
-
-#: keystone/catalog/core.py:62
-#, python-format
-msgid "Malformed endpoint - %(url)r is not a string"
-msgstr ""
+msgid "Failed to validate token"
+msgstr "Failed to validate token"
-#: keystone/catalog/core.py:66
#, python-format
msgid "Malformed endpoint %(url)s - unknown key %(keyerror)s"
msgstr "Malformed endpoint %(url)s - unknown key %(keyerror)s"
-#: keystone/catalog/core.py:71
-#, python-format
-msgid ""
-"Malformed endpoint '%(url)s'. The following type error occurred during "
-"string substitution: %(typeerror)s"
-msgstr ""
-
-#: keystone/catalog/core.py:77
#, python-format
msgid ""
"Malformed endpoint %s - incomplete format (are you missing a type notifier ?)"
msgstr ""
"Malformed endpoint %s - incomplete format (are you missing a type notifier ?)"
-#: keystone/common/openssl.py:93
-#, python-format
-msgid "Command %(to_exec)s exited with %(retcode)s- %(output)s"
-msgstr ""
-
-#: keystone/common/openssl.py:121
-#, python-format
-msgid "Failed to remove file %(file_path)r: %(error)s"
-msgstr ""
-
-#: keystone/common/utils.py:239
-msgid ""
-"Error setting up the debug environment. Verify that the option --debug-url "
-"has the format <host>:<port> and that a debugger processes is listening on "
-"that port."
-msgstr ""
-"Error setting up the debug environment. Verify that the option --debug-url "
-"has the format <host>:<port> and that a debugger processes is listening on "
-"that port."
+msgid "Server error"
+msgstr "Server error"
-#: keystone/common/cache/core.py:100
#, python-format
msgid ""
"Unable to build cache config-key. Expected format \"<argname>:<value>\". "
@@ -86,94 +56,9 @@ msgstr ""
"Unable to build cache config-key. Expected format \"<argname>:<value>\". "
"Skipping unknown format: %s"
-#: keystone/common/environment/eventlet_server.py:99
-#, python-format
-msgid "Could not bind to %(host)s:%(port)s"
-msgstr ""
-
-#: keystone/common/environment/eventlet_server.py:185
-msgid "Server error"
-msgstr "Server error"
-
-#: keystone/contrib/endpoint_policy/core.py:129
-#: keystone/contrib/endpoint_policy/core.py:228
-#, python-format
-msgid ""
-"Circular reference or a repeated entry found in region tree - %(region_id)s."
-msgstr ""
-
-#: keystone/contrib/federation/idp.py:410
-#, python-format
-msgid "Error when signing assertion, reason: %(reason)s"
-msgstr ""
-
-#: keystone/contrib/oauth1/core.py:136
-msgid "Cannot retrieve Authorization headers"
-msgstr ""
-
-#: keystone/openstack/common/loopingcall.py:95
-msgid "in fixed duration looping call"
-msgstr "in fixed duration looping call"
-
-#: keystone/openstack/common/loopingcall.py:138
-msgid "in dynamic looping call"
-msgstr "in dynamic looping call"
-
-#: keystone/openstack/common/service.py:268
-msgid "Unhandled exception"
-msgstr "Unhandled exception"
-
-#: keystone/resource/core.py:477
-#, python-format
-msgid ""
-"Circular reference or a repeated entry found projects hierarchy - "
-"%(project_id)s."
-msgstr ""
-
-#: keystone/resource/core.py:939
-#, python-format
-msgid ""
-"Unexpected results in response for domain config - %(count)s responses, "
-"first option is %(option)s, expected option %(expected)s"
-msgstr ""
-
-#: keystone/resource/backends/sql.py:102 keystone/resource/backends/sql.py:121
-#, python-format
-msgid ""
-"Circular reference or a repeated entry found in projects hierarchy - "
-"%(project_id)s."
-msgstr ""
-
-#: keystone/token/provider.py:292
-#, python-format
-msgid "Unexpected error or malformed token determining token expiry: %s"
-msgstr "Unexpected error or malformed token determining token expiry: %s"
-
-#: keystone/token/persistence/backends/kvs.py:226
-#, python-format
-msgid ""
-"Reinitializing revocation list due to error in loading revocation list from "
-"backend. Expected `list` type got `%(type)s`. Old revocation list data: "
-"%(list)r"
-msgstr ""
-
-#: keystone/token/providers/common.py:611
-msgid "Failed to validate token"
-msgstr "Failed to validate token"
-
-#: keystone/token/providers/pki.py:47
msgid "Unable to sign token"
msgstr "Unable to sign token"
-#: keystone/token/providers/fernet/utils.py:38
#, python-format
-msgid ""
-"Either [fernet_tokens] key_repository does not exist or Keystone does not "
-"have sufficient permission to access it: %s"
-msgstr ""
-
-#: keystone/token/providers/fernet/utils.py:79
-msgid ""
-"Failed to create [fernet_tokens] key_repository: either it already exists or "
-"you don't have sufficient permissions to create it"
-msgstr ""
+msgid "Unexpected error or malformed token determining token expiry: %s"
+msgstr "Unexpected error or malformed token determining token expiry: %s"
diff --git a/keystone-moon/keystone/locale/en_AU/LC_MESSAGES/keystone.po b/keystone-moon/keystone/locale/en_AU/LC_MESSAGES/keystone.po
index e3dea47d..dca5aa9b 100644
--- a/keystone-moon/keystone/locale/en_AU/LC_MESSAGES/keystone.po
+++ b/keystone-moon/keystone/locale/en_AU/LC_MESSAGES/keystone.po
@@ -8,1535 +8,340 @@ msgid ""
msgstr ""
"Project-Id-Version: Keystone\n"
"Report-Msgid-Bugs-To: https://bugs.launchpad.net/keystone\n"
-"POT-Creation-Date: 2015-03-23 06:04+0000\n"
-"PO-Revision-Date: 2015-03-21 23:03+0000\n"
+"POT-Creation-Date: 2015-08-06 06:28+0000\n"
+"PO-Revision-Date: 2015-08-04 18:01+0000\n"
"Last-Translator: openstackjenkins <jenkins@openstack.org>\n"
-"Language-Team: English (Australia) "
-"(http://www.transifex.com/projects/p/keystone/language/en_AU/)\n"
+"Language-Team: English (Australia) (http://www.transifex.com/openstack/"
+"keystone/language/en_AU/)\n"
"Plural-Forms: nplurals=2; plural=(n != 1)\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=utf-8\n"
"Content-Transfer-Encoding: 8bit\n"
-"Generated-By: Babel 1.3\n"
+"Generated-By: Babel 2.0\n"
-#: keystone/clean.py:24
-#, python-format
-msgid "%s cannot be empty."
-msgstr "%s cannot be empty."
-
-#: keystone/clean.py:26
#, python-format
msgid "%(property_name)s cannot be less than %(min_length)s characters."
msgstr "%(property_name)s cannot be less than %(min_length)s characters."
-#: keystone/clean.py:31
-#, python-format
-msgid "%(property_name)s should not be greater than %(max_length)s characters."
-msgstr "%(property_name)s should not be greater than %(max_length)s characters."
-
-#: keystone/clean.py:40
#, python-format
msgid "%(property_name)s is not a %(display_expected_type)s"
msgstr "%(property_name)s is not a %(display_expected_type)s"
-#: keystone/cli.py:283
-msgid "At least one option must be provided"
-msgstr ""
-
-#: keystone/cli.py:290
-msgid "--all option cannot be mixed with other options"
-msgstr ""
-
-#: keystone/cli.py:301
-#, python-format
-msgid "Unknown domain '%(name)s' specified by --domain-name"
-msgstr ""
-
-#: keystone/cli.py:365 keystone/tests/unit/test_cli.py:213
-msgid "At least one option must be provided, use either --all or --domain-name"
-msgstr ""
-
-#: keystone/cli.py:371 keystone/tests/unit/test_cli.py:229
-msgid "The --all option cannot be used with the --domain-name option"
-msgstr ""
-
-#: keystone/cli.py:397 keystone/tests/unit/test_cli.py:246
-#, python-format
-msgid ""
-"Invalid domain name: %(domain)s found in config file name: %(file)s - "
-"ignoring this file."
-msgstr ""
-
-#: keystone/cli.py:405 keystone/tests/unit/test_cli.py:187
-#, python-format
-msgid ""
-"Domain: %(domain)s already has a configuration defined - ignoring file: "
-"%(file)s."
-msgstr ""
-
-#: keystone/cli.py:419
-#, python-format
-msgid "Error parsing configuration file for domain: %(domain)s, file: %(file)s."
-msgstr ""
-
-#: keystone/cli.py:452
-#, python-format
-msgid ""
-"To get a more detailed information on this error, re-run this command for"
-" the specific domain, i.e.: keystone-manage domain_config_upload "
-"--domain-name %s"
-msgstr ""
-
-#: keystone/cli.py:470
-#, python-format
-msgid "Unable to locate domain config directory: %s"
-msgstr "Unable to locate domain config directory: %s"
-
-#: keystone/cli.py:503
-msgid ""
-"Unable to access the keystone database, please check it is configured "
-"correctly."
-msgstr ""
-
-#: keystone/exception.py:79
-#, python-format
-msgid ""
-"Expecting to find %(attribute)s in %(target)s - the server could not "
-"comply with the request since it is either malformed or otherwise "
-"incorrect. The client is assumed to be in error."
-msgstr ""
-
-#: keystone/exception.py:90
-#, python-format
-msgid "%(detail)s"
-msgstr ""
-
-#: keystone/exception.py:94
-msgid ""
-"Timestamp not in expected format. The server could not comply with the "
-"request since it is either malformed or otherwise incorrect. The client "
-"is assumed to be in error."
-msgstr ""
-"Timestamp not in expected format. The server could not comply with the "
-"request since it is either malformed or otherwise incorrect. The client "
-"is assumed to be in error."
-
-#: keystone/exception.py:103
-#, python-format
-msgid ""
-"String length exceeded.The length of string '%(string)s' exceeded the "
-"limit of column %(type)s(CHAR(%(length)d))."
-msgstr ""
-"String length exceeded.The length of string '%(string)s' exceeded the "
-"limit of column %(type)s(CHAR(%(length)d))."
-
-#: keystone/exception.py:109
#, python-format
-msgid ""
-"Request attribute %(attribute)s must be less than or equal to %(size)i. "
-"The server could not comply with the request because the attribute size "
-"is invalid (too large). The client is assumed to be in error."
-msgstr ""
-"Request attribute %(attribute)s must be less than or equal to %(size)i. "
-"The server could not comply with the request because the attribute size "
-"is invalid (too large). The client is assumed to be in error."
-
-#: keystone/exception.py:119
-#, python-format
-msgid ""
-"The specified parent region %(parent_region_id)s would create a circular "
-"region hierarchy."
-msgstr ""
-
-#: keystone/exception.py:126
-#, python-format
-msgid ""
-"The password length must be less than or equal to %(size)i. The server "
-"could not comply with the request because the password is invalid."
-msgstr ""
-
-#: keystone/exception.py:134
-#, python-format
-msgid ""
-"Unable to delete region %(region_id)s because it or its child regions "
-"have associated endpoints."
-msgstr ""
-
-#: keystone/exception.py:141
-msgid ""
-"The certificates you requested are not available. It is likely that this "
-"server does not use PKI tokens otherwise this is the result of "
-"misconfiguration."
-msgstr ""
-
-#: keystone/exception.py:150
-msgid "(Disable debug mode to suppress these details.)"
+msgid "%(property_name)s should not be greater than %(max_length)s characters."
msgstr ""
+"%(property_name)s should not be greater than %(max_length)s characters."
-#: keystone/exception.py:155
#, python-format
-msgid "%(message)s %(amendment)s"
-msgstr ""
-
-#: keystone/exception.py:163
-msgid "The request you have made requires authentication."
-msgstr "The request you have made requires authentication."
-
-#: keystone/exception.py:169
-msgid "Authentication plugin error."
-msgstr "Authentication plugin error."
+msgid "%s cannot be empty."
+msgstr "%s cannot be empty."
-#: keystone/exception.py:177
-#, python-format
-msgid "Unable to find valid groups while using mapping %(mapping_id)s"
-msgstr ""
+msgid "Access token is expired"
+msgstr "Access token is expired"
-#: keystone/exception.py:182
-msgid "Attempted to authenticate with an unsupported method."
-msgstr "Attempted to authenticate with an unsupported method."
+msgid "Access token not found"
+msgstr "Access token not found"
-#: keystone/exception.py:190
msgid "Additional authentications steps required."
msgstr "Additional authentications steps required."
-#: keystone/exception.py:198
-msgid "You are not authorized to perform the requested action."
-msgstr "You are not authorized to perform the requested action."
-
-#: keystone/exception.py:205
-#, python-format
-msgid "You are not authorized to perform the requested action: %(action)s"
-msgstr ""
-
-#: keystone/exception.py:210
-#, python-format
-msgid ""
-"Could not change immutable attribute(s) '%(attributes)s' in target "
-"%(target)s"
-msgstr ""
-
-#: keystone/exception.py:215
-#, python-format
-msgid ""
-"Group membership across backend boundaries is not allowed, group in "
-"question is %(group_id)s, user is %(user_id)s"
-msgstr ""
-
-#: keystone/exception.py:221
-#, python-format
-msgid ""
-"Invalid mix of entities for policy association - only Endpoint, Service "
-"or Region+Service allowed. Request was - Endpoint: %(endpoint_id)s, "
-"Service: %(service_id)s, Region: %(region_id)s"
-msgstr ""
-
-#: keystone/exception.py:228
-#, python-format
-msgid "Invalid domain specific configuration: %(reason)s"
-msgstr ""
-
-#: keystone/exception.py:232
-#, python-format
-msgid "Could not find: %(target)s"
-msgstr ""
-
-#: keystone/exception.py:238
-#, python-format
-msgid "Could not find endpoint: %(endpoint_id)s"
-msgstr ""
-
-#: keystone/exception.py:245
msgid "An unhandled exception has occurred: Could not find metadata."
msgstr "An unhandled exception has occurred: Could not find metadata."
-#: keystone/exception.py:250
-#, python-format
-msgid "Could not find policy: %(policy_id)s"
-msgstr ""
-
-#: keystone/exception.py:254
-msgid "Could not find policy association"
-msgstr ""
-
-#: keystone/exception.py:258
-#, python-format
-msgid "Could not find role: %(role_id)s"
-msgstr ""
-
-#: keystone/exception.py:262
-#, python-format
-msgid ""
-"Could not find role assignment with role: %(role_id)s, user or group: "
-"%(actor_id)s, project or domain: %(target_id)s"
-msgstr ""
-
-#: keystone/exception.py:268
-#, python-format
-msgid "Could not find region: %(region_id)s"
-msgstr ""
-
-#: keystone/exception.py:272
-#, python-format
-msgid "Could not find service: %(service_id)s"
-msgstr ""
-
-#: keystone/exception.py:276
-#, python-format
-msgid "Could not find domain: %(domain_id)s"
-msgstr ""
-
-#: keystone/exception.py:280
-#, python-format
-msgid "Could not find project: %(project_id)s"
-msgstr ""
-
-#: keystone/exception.py:284
-#, python-format
-msgid "Cannot create project with parent: %(project_id)s"
-msgstr ""
-
-#: keystone/exception.py:288
-#, python-format
-msgid "Could not find token: %(token_id)s"
-msgstr ""
-
-#: keystone/exception.py:292
-#, python-format
-msgid "Could not find user: %(user_id)s"
-msgstr ""
-
-#: keystone/exception.py:296
-#, python-format
-msgid "Could not find group: %(group_id)s"
-msgstr ""
-
-#: keystone/exception.py:300
-#, python-format
-msgid "Could not find mapping: %(mapping_id)s"
-msgstr ""
-
-#: keystone/exception.py:304
-#, python-format
-msgid "Could not find trust: %(trust_id)s"
-msgstr ""
-
-#: keystone/exception.py:308
-#, python-format
-msgid "No remaining uses for trust: %(trust_id)s"
-msgstr ""
-
-#: keystone/exception.py:312
-#, python-format
-msgid "Could not find credential: %(credential_id)s"
-msgstr ""
-
-#: keystone/exception.py:316
-#, python-format
-msgid "Could not find version: %(version)s"
-msgstr ""
-
-#: keystone/exception.py:320
-#, python-format
-msgid "Could not find Endpoint Group: %(endpoint_group_id)s"
-msgstr ""
-
-#: keystone/exception.py:324
-#, python-format
-msgid "Could not find Identity Provider: %(idp_id)s"
-msgstr ""
-
-#: keystone/exception.py:328
-#, python-format
-msgid "Could not find Service Provider: %(sp_id)s"
-msgstr ""
-
-#: keystone/exception.py:332
-#, python-format
-msgid ""
-"Could not find federated protocol %(protocol_id)s for Identity Provider: "
-"%(idp_id)s"
-msgstr ""
-
-#: keystone/exception.py:343
-#, python-format
-msgid ""
-"Could not find %(group_or_option)s in domain configuration for domain "
-"%(domain_id)s"
-msgstr ""
-
-#: keystone/exception.py:348
-#, python-format
-msgid "Conflict occurred attempting to store %(type)s - %(details)s"
-msgstr ""
-
-#: keystone/exception.py:356
-msgid "An unexpected error prevented the server from fulfilling your request."
-msgstr ""
-
-#: keystone/exception.py:359
-#, python-format
-msgid ""
-"An unexpected error prevented the server from fulfilling your request: "
-"%(exception)s"
-msgstr ""
-
-#: keystone/exception.py:382
-#, python-format
-msgid "Unable to consume trust %(trust_id)s, unable to acquire lock."
-msgstr ""
-
-#: keystone/exception.py:387
-msgid ""
-"Expected signing certificates are not available on the server. Please "
-"check Keystone configuration."
-msgstr ""
-
-#: keystone/exception.py:393
-#, python-format
-msgid "Malformed endpoint URL (%(endpoint)s), see ERROR log for details."
-msgstr "Malformed endpoint URL (%(endpoint)s), see ERROR log for details."
-
-#: keystone/exception.py:398
-#, python-format
-msgid ""
-"Group %(group_id)s returned by mapping %(mapping_id)s was not found in "
-"the backend."
-msgstr ""
-
-#: keystone/exception.py:403
-#, python-format
-msgid "Error while reading metadata file, %(reason)s"
-msgstr ""
-
-#: keystone/exception.py:407
-#, python-format
-msgid ""
-"Unexpected combination of grant attributes - User: %(user_id)s, Group: "
-"%(group_id)s, Project: %(project_id)s, Domain: %(domain_id)s"
-msgstr ""
-
-#: keystone/exception.py:414
-msgid "The action you have requested has not been implemented."
-msgstr "The action you have requested has not been implemented."
-
-#: keystone/exception.py:421
-msgid "The service you have requested is no longer available on this server."
-msgstr ""
-
-#: keystone/exception.py:428
-#, python-format
-msgid "The Keystone configuration file %(config_file)s could not be found."
-msgstr "The Keystone configuration file %(config_file)s could not be found."
-
-#: keystone/exception.py:433
-msgid ""
-"No encryption keys found; run keystone-manage fernet_setup to bootstrap "
-"one."
-msgstr ""
-
-#: keystone/exception.py:438
-#, python-format
-msgid ""
-"The Keystone domain-specific configuration has specified more than one "
-"SQL driver (only one is permitted): %(source)s."
-msgstr ""
+msgid "Attempted to authenticate with an unsupported method."
+msgstr "Attempted to authenticate with an unsupported method."
-#: keystone/exception.py:445
-#, python-format
-msgid ""
-"%(mod_name)s doesn't provide database migrations. The migration "
-"repository path at %(path)s doesn't exist or isn't a directory."
-msgstr ""
+msgid "Authentication plugin error."
+msgstr "Authentication plugin error."
-#: keystone/exception.py:457
#, python-format
-msgid ""
-"Unable to sign SAML assertion. It is likely that this server does not "
-"have xmlsec1 installed, or this is the result of misconfiguration. Reason"
-" %(reason)s"
-msgstr ""
-
-#: keystone/exception.py:465
-msgid ""
-"No Authorization headers found, cannot proceed with OAuth related calls, "
-"if running under HTTPd or Apache, ensure WSGIPassAuthorization is set to "
-"On."
-msgstr ""
+msgid "Cannot change %(option_name)s %(attr)s"
+msgstr "Cannot change %(option_name)s %(attr)s"
-#: keystone/notifications.py:250
-#, python-format
-msgid "%(event)s is not a valid notification event, must be one of: %(actions)s"
-msgstr ""
+msgid "Cannot change consumer secret"
+msgstr "Cannot change consumer secret"
-#: keystone/notifications.py:259
#, python-format
-msgid "Method not callable: %s"
-msgstr ""
-
-#: keystone/assignment/controllers.py:107 keystone/identity/controllers.py:69
-#: keystone/resource/controllers.py:78
-msgid "Name field is required and cannot be empty"
-msgstr "Name field is required and cannot be empty"
-
-#: keystone/assignment/controllers.py:330
-#: keystone/assignment/controllers.py:753
-msgid "Specify a domain or project, not both"
-msgstr "Specify a domain or project, not both"
-
-#: keystone/assignment/controllers.py:333
-msgid "Specify one of domain or project"
-msgstr ""
-
-#: keystone/assignment/controllers.py:338
-#: keystone/assignment/controllers.py:758
-msgid "Specify a user or group, not both"
-msgstr "Specify a user or group, not both"
-
-#: keystone/assignment/controllers.py:341
-msgid "Specify one of user or group"
-msgstr ""
-
-#: keystone/assignment/controllers.py:742
-msgid "Combining effective and group filter will always result in an empty list."
-msgstr ""
+msgid "Cannot remove role that has not been granted, %s"
+msgstr "Cannot remove role that has not been granted, %s"
-#: keystone/assignment/controllers.py:747
-msgid ""
-"Combining effective, domain and inherited filters will always result in "
-"an empty list."
-msgstr ""
+msgid "Consumer not found"
+msgstr "Consumer not found"
-#: keystone/assignment/core.py:228
-msgid "Must specify either domain or project"
-msgstr ""
+msgid "Could not find role"
+msgstr "Could not find role"
-#: keystone/assignment/core.py:493
-#, python-format
-msgid "Project (%s)"
-msgstr "Project (%s)"
+msgid "Credential belongs to another user"
+msgstr "Credential belongs to another user"
-#: keystone/assignment/core.py:495
#, python-format
msgid "Domain (%s)"
msgstr "Domain (%s)"
-#: keystone/assignment/core.py:497
-msgid "Unknown Target"
-msgstr "Unknown Target"
-
-#: keystone/assignment/backends/ldap.py:92
-msgid "Domain metadata not supported by LDAP"
-msgstr ""
-
-#: keystone/assignment/backends/ldap.py:381
-#, python-format
-msgid "User %(user_id)s already has role %(role_id)s in tenant %(tenant_id)s"
-msgstr ""
-
-#: keystone/assignment/backends/ldap.py:387
-#, python-format
-msgid "Role %s not found"
-msgstr "Role %s not found"
-
-#: keystone/assignment/backends/ldap.py:402
-#: keystone/assignment/backends/sql.py:335
#, python-format
-msgid "Cannot remove role that has not been granted, %s"
-msgstr "Cannot remove role that has not been granted, %s"
+msgid "Domain is disabled: %s"
+msgstr "Domain is disabled: %s"
-#: keystone/assignment/backends/sql.py:356
-#, python-format
-msgid "Unexpected assignment type encountered, %s"
-msgstr ""
+msgid "Domain scoped token is not supported"
+msgstr "Domain scoped token is not supported"
-#: keystone/assignment/role_backends/ldap.py:61 keystone/catalog/core.py:103
-#: keystone/common/ldap/core.py:1400 keystone/resource/backends/ldap.py:149
#, python-format
msgid "Duplicate ID, %s."
msgstr "Duplicate ID, %s."
-#: keystone/assignment/role_backends/ldap.py:69
-#: keystone/common/ldap/core.py:1390
#, python-format
msgid "Duplicate name, %s."
msgstr "Duplicate name, %s."
-#: keystone/assignment/role_backends/ldap.py:119
-#, python-format
-msgid "Cannot duplicate name %s"
-msgstr ""
-
-#: keystone/auth/controllers.py:60
-#, python-format
-msgid ""
-"Cannot load an auth-plugin by class-name without a \"method\" attribute "
-"defined: %s"
-msgstr ""
-
-#: keystone/auth/controllers.py:71
-#, python-format
-msgid ""
-"Auth plugin %(plugin)s is requesting previously registered method "
-"%(method)s"
-msgstr ""
-
-#: keystone/auth/controllers.py:115
-#, python-format
-msgid ""
-"Unable to reconcile identity attribute %(attribute)s as it has "
-"conflicting values %(new)s and %(old)s"
-msgstr ""
-
-#: keystone/auth/controllers.py:336
-msgid "Scoping to both domain and project is not allowed"
-msgstr "Scoping to both domain and project is not allowed"
-
-#: keystone/auth/controllers.py:339
-msgid "Scoping to both domain and trust is not allowed"
-msgstr "Scoping to both domain and trust is not allowed"
-
-#: keystone/auth/controllers.py:342
-msgid "Scoping to both project and trust is not allowed"
-msgstr "Scoping to both project and trust is not allowed"
-
-#: keystone/auth/controllers.py:512
-msgid "User not found"
-msgstr "User not found"
-
-#: keystone/auth/controllers.py:616
-msgid "A project-scoped token is required to produce a service catalog."
-msgstr ""
-
-#: keystone/auth/plugins/external.py:46
-msgid "No authenticated user"
-msgstr "No authenticated user"
-
-#: keystone/auth/plugins/external.py:56
-#, python-format
-msgid "Unable to lookup user %s"
-msgstr "Unable to lookup user %s"
-
-#: keystone/auth/plugins/external.py:107
-msgid "auth_type is not Negotiate"
-msgstr ""
-
-#: keystone/auth/plugins/mapped.py:244
-msgid "Could not map user"
-msgstr ""
-
-#: keystone/auth/plugins/oauth1.py:39
-#, python-format
-msgid "%s not supported"
-msgstr ""
-
-#: keystone/auth/plugins/oauth1.py:57
-msgid "Access token is expired"
-msgstr "Access token is expired"
-
-#: keystone/auth/plugins/oauth1.py:71
-msgid "Could not validate the access token"
-msgstr ""
-
-#: keystone/auth/plugins/password.py:46
-msgid "Invalid username or password"
-msgstr "Invalid username or password"
-
-#: keystone/auth/plugins/token.py:72 keystone/token/controllers.py:160
-msgid "rescope a scoped token"
-msgstr ""
-
-#: keystone/catalog/controllers.py:168
-#, python-format
-msgid "Conflicting region IDs specified: \"%(url_id)s\" != \"%(ref_id)s\""
-msgstr ""
-
-#: keystone/common/authorization.py:47 keystone/common/wsgi.py:64
-#, python-format
-msgid "token reference must be a KeystoneToken type, got: %s"
-msgstr ""
-
-#: keystone/common/base64utils.py:66
-msgid "pad must be single character"
-msgstr "pad must be single character"
-
-#: keystone/common/base64utils.py:215
-#, python-format
-msgid "text is multiple of 4, but pad \"%s\" occurs before 2nd to last char"
-msgstr "text is multiple of 4, but pad \"%s\" occurs before 2nd to last char"
+msgid "Enabled field must be a boolean"
+msgstr "Enabled field must be a boolean"
-#: keystone/common/base64utils.py:219
-#, python-format
-msgid "text is multiple of 4, but pad \"%s\" occurs before non-pad last char"
-msgstr "text is multiple of 4, but pad \"%s\" occurs before non-pad last char"
+msgid "Enabled field should be a boolean"
+msgstr "Enabled field should be a boolean"
-#: keystone/common/base64utils.py:225
#, python-format
-msgid "text is not a multiple of 4, but contains pad \"%s\""
-msgstr "text is not a multiple of 4, but contains pad \"%s\""
-
-#: keystone/common/base64utils.py:244 keystone/common/base64utils.py:265
-msgid "padded base64url text must be multiple of 4 characters"
-msgstr "padded base64url text must be multiple of 4 characters"
-
-#: keystone/common/controller.py:237 keystone/token/providers/common.py:589
-msgid "Non-default domain is not supported"
-msgstr "Non-default domain is not supported"
+msgid "Endpoint %(endpoint_id)s not found in project %(project_id)s"
+msgstr "Endpoint %(endpoint_id)s not found in project %(project_id)s"
-#: keystone/common/controller.py:305 keystone/identity/core.py:428
-#: keystone/resource/core.py:761 keystone/resource/backends/ldap.py:61
#, python-format
msgid "Expected dict or list: %s"
msgstr "Expected dict or list: %s"
-#: keystone/common/controller.py:318
-msgid "Marker could not be found"
-msgstr "Marker could not be found"
-
-#: keystone/common/controller.py:329
-msgid "Invalid limit value"
-msgstr "Invalid limit value"
-
-#: keystone/common/controller.py:637
-msgid "Cannot change Domain ID"
-msgstr ""
-
-#: keystone/common/controller.py:666
-msgid "domain_id is required as part of entity"
-msgstr ""
-
-#: keystone/common/controller.py:701
-msgid "A domain-scoped token must be used"
-msgstr ""
-
-#: keystone/common/dependency.py:68
-#, python-format
-msgid "Unregistered dependency: %(name)s for %(targets)s"
-msgstr ""
-
-#: keystone/common/dependency.py:108
-msgid "event_callbacks must be a dict"
-msgstr ""
-
-#: keystone/common/dependency.py:113
-#, python-format
-msgid "event_callbacks[%s] must be a dict"
-msgstr ""
-
-#: keystone/common/pemutils.py:223
-#, python-format
-msgid "unknown pem_type \"%(pem_type)s\", valid types are: %(valid_pem_types)s"
-msgstr "unknown pem_type \"%(pem_type)s\", valid types are: %(valid_pem_types)s"
-
-#: keystone/common/pemutils.py:242
-#, python-format
-msgid ""
-"unknown pem header \"%(pem_header)s\", valid headers are: "
-"%(valid_pem_headers)s"
-msgstr ""
-"unknown pem header \"%(pem_header)s\", valid headers are: "
-"%(valid_pem_headers)s"
-
-#: keystone/common/pemutils.py:298
-#, python-format
-msgid "failed to find end matching \"%s\""
-msgstr "failed to find end matching \"%s\""
-
-#: keystone/common/pemutils.py:302
-#, python-format
-msgid ""
-"beginning & end PEM headers do not match (%(begin_pem_header)s!= "
-"%(end_pem_header)s)"
-msgstr ""
-"beginning & end PEM headers do not match (%(begin_pem_header)s!= "
-"%(end_pem_header)s)"
-
-#: keystone/common/pemutils.py:377
-#, python-format
-msgid "unknown pem_type: \"%s\""
-msgstr "unknown pem_type: \"%s\""
-
-#: keystone/common/pemutils.py:389
-#, python-format
-msgid ""
-"failed to base64 decode %(pem_type)s PEM at position%(position)d: "
-"%(err_msg)s"
-msgstr ""
-"failed to base64 decode %(pem_type)s PEM at position%(position)d: "
-"%(err_msg)s"
-
-#: keystone/common/utils.py:164 keystone/credential/controllers.py:44
-msgid "Invalid blob in credential"
-msgstr "Invalid blob in credential"
-
-#: keystone/common/wsgi.py:330
-#, python-format
-msgid "%s field is required and cannot be empty"
-msgstr ""
-
-#: keystone/common/wsgi.py:342
-#, python-format
-msgid "%s field(s) cannot be empty"
-msgstr ""
-
-#: keystone/common/wsgi.py:563
-msgid "The resource could not be found."
-msgstr "The resource could not be found."
-
-#: keystone/common/wsgi.py:704
-#, python-format
-msgid "Unexpected status requested for JSON Home response, %s"
-msgstr ""
-
-#: keystone/common/cache/_memcache_pool.py:113
-#, python-format
-msgid "Unable to get a connection from pool id %(id)s after %(seconds)s seconds."
-msgstr ""
-
-#: keystone/common/cache/core.py:132
-msgid "region not type dogpile.cache.CacheRegion"
-msgstr "region not type dogpile.cache.CacheRegion"
-
-#: keystone/common/cache/backends/mongo.py:231
-msgid "db_hosts value is required"
-msgstr ""
-
-#: keystone/common/cache/backends/mongo.py:236
-msgid "database db_name is required"
-msgstr ""
-
-#: keystone/common/cache/backends/mongo.py:241
-msgid "cache_collection name is required"
-msgstr ""
-
-#: keystone/common/cache/backends/mongo.py:252
-msgid "integer value expected for w (write concern attribute)"
-msgstr ""
-
-#: keystone/common/cache/backends/mongo.py:260
-msgid "replicaset_name required when use_replica is True"
-msgstr ""
-
-#: keystone/common/cache/backends/mongo.py:275
-msgid "integer value expected for mongo_ttl_seconds"
-msgstr ""
-
-#: keystone/common/cache/backends/mongo.py:301
-msgid "no ssl support available"
-msgstr ""
-
-#: keystone/common/cache/backends/mongo.py:310
-#, python-format
-msgid ""
-"Invalid ssl_cert_reqs value of %s, must be one of \"NONE\", \"OPTIONAL\","
-" \"REQUIRED\""
-msgstr ""
-
-#: keystone/common/kvs/core.py:71
-#, python-format
-msgid "Lock Timeout occurred for key, %(target)s"
-msgstr ""
-
-#: keystone/common/kvs/core.py:106
-#, python-format
-msgid "KVS region %s is already configured. Cannot reconfigure."
-msgstr ""
-
-#: keystone/common/kvs/core.py:145
-#, python-format
-msgid "Key Value Store not configured: %s"
-msgstr ""
-
-#: keystone/common/kvs/core.py:198
-msgid "`key_mangler` option must be a function reference"
-msgstr ""
-
-#: keystone/common/kvs/core.py:353
-#, python-format
-msgid "Lock key must match target key: %(lock)s != %(target)s"
-msgstr ""
-
-#: keystone/common/kvs/core.py:357
-msgid "Must be called within an active lock context."
-msgstr ""
-
-#: keystone/common/kvs/backends/memcached.py:69
-#, python-format
-msgid "Maximum lock attempts on %s occurred."
-msgstr ""
-
-#: keystone/common/kvs/backends/memcached.py:108
-#, python-format
-msgid ""
-"Backend `%(driver)s` is not a valid memcached backend. Valid drivers: "
-"%(driver_list)s"
-msgstr ""
-
-#: keystone/common/kvs/backends/memcached.py:178
-msgid "`key_mangler` functions must be callable."
-msgstr ""
-
-#: keystone/common/ldap/core.py:191
-#, python-format
-msgid "Invalid LDAP deref option: %(option)s. Choose one of: %(options)s"
-msgstr ""
+msgid "Failed to validate token"
+msgstr "Failed to validate token"
-#: keystone/common/ldap/core.py:201
#, python-format
msgid "Invalid LDAP TLS certs option: %(option)s. Choose one of: %(options)s"
msgstr "Invalid LDAP TLS certs option: %(option)s. Choose one of: %(options)s"
-#: keystone/common/ldap/core.py:213
+#, python-format
+msgid "Invalid LDAP TLS_AVAIL option: %s. TLS not available"
+msgstr "Invalid LDAP TLS_AVAIL option: %s. TLS not available"
+
#, python-format
msgid "Invalid LDAP scope: %(scope)s. Choose one of: %(options)s"
msgstr "Invalid LDAP scope: %(scope)s. Choose one of: %(options)s"
-#: keystone/common/ldap/core.py:588
msgid "Invalid TLS / LDAPS combination"
msgstr "Invalid TLS / LDAPS combination"
-#: keystone/common/ldap/core.py:593
-#, python-format
-msgid "Invalid LDAP TLS_AVAIL option: %s. TLS not available"
-msgstr "Invalid LDAP TLS_AVAIL option: %s. TLS not available"
-
-#: keystone/common/ldap/core.py:603
-#, python-format
-msgid "tls_cacertfile %s not found or is not a file"
-msgstr "tls_cacertfile %s not found or is not a file"
+msgid "Invalid blob in credential"
+msgstr "Invalid blob in credential"
-#: keystone/common/ldap/core.py:615
-#, python-format
-msgid "tls_cacertdir %s not found or is not a directory"
-msgstr "tls_cacertdir %s not found or is not a directory"
+msgid "Invalid limit value"
+msgstr "Invalid limit value"
-#: keystone/common/ldap/core.py:1325
-#, python-format
-msgid "ID attribute %(id_attr)s not found in LDAP object %(dn)s"
-msgstr ""
+msgid "Invalid username or password"
+msgstr "Invalid username or password"
-#: keystone/common/ldap/core.py:1369
#, python-format
msgid "LDAP %s create"
msgstr "LDAP %s create"
-#: keystone/common/ldap/core.py:1374
-#, python-format
-msgid "LDAP %s update"
-msgstr "LDAP %s update"
-
-#: keystone/common/ldap/core.py:1379
#, python-format
msgid "LDAP %s delete"
msgstr "LDAP %s delete"
-#: keystone/common/ldap/core.py:1521
-msgid ""
-"Disabling an entity where the 'enable' attribute is ignored by "
-"configuration."
-msgstr ""
-
-#: keystone/common/ldap/core.py:1532
-#, python-format
-msgid "Cannot change %(option_name)s %(attr)s"
-msgstr "Cannot change %(option_name)s %(attr)s"
-
-#: keystone/common/ldap/core.py:1619
#, python-format
-msgid "Member %(member)s is already a member of group %(group)s"
-msgstr ""
-
-#: keystone/common/sql/core.py:219
-msgid ""
-"Cannot truncate a driver call without hints list as first parameter after"
-" self "
-msgstr ""
-
-#: keystone/common/sql/core.py:410
-msgid "Duplicate Entry"
-msgstr ""
-
-#: keystone/common/sql/core.py:426
-#, python-format
-msgid "An unexpected error occurred when trying to store %s"
-msgstr ""
-
-#: keystone/common/sql/migration_helpers.py:187
-#: keystone/common/sql/migration_helpers.py:245
-#, python-format
-msgid "%s extension does not exist."
-msgstr ""
+msgid "LDAP %s update"
+msgstr "LDAP %s update"
-#: keystone/common/validation/validators.py:54
#, python-format
-msgid "Invalid input for field '%(path)s'. The value is '%(value)s'."
-msgstr ""
-
-#: keystone/contrib/ec2/controllers.py:318
-msgid "Token belongs to another user"
-msgstr "Token belongs to another user"
-
-#: keystone/contrib/ec2/controllers.py:346
-msgid "Credential belongs to another user"
-msgstr "Credential belongs to another user"
+msgid "Malformed endpoint URL (%(endpoint)s), see ERROR log for details."
+msgstr "Malformed endpoint URL (%(endpoint)s), see ERROR log for details."
-#: keystone/contrib/endpoint_filter/backends/sql.py:69
-#, python-format
-msgid "Endpoint %(endpoint_id)s not found in project %(project_id)s"
-msgstr "Endpoint %(endpoint_id)s not found in project %(project_id)s"
+msgid "Marker could not be found"
+msgstr "Marker could not be found"
-#: keystone/contrib/endpoint_filter/backends/sql.py:180
-msgid "Endpoint Group Project Association not found"
-msgstr ""
+msgid "Name field is required and cannot be empty"
+msgstr "Name field is required and cannot be empty"
-#: keystone/contrib/endpoint_policy/core.py:258
-#, python-format
-msgid "No policy is associated with endpoint %(endpoint_id)s."
-msgstr ""
+msgid "No authenticated user"
+msgstr "No authenticated user"
-#: keystone/contrib/federation/controllers.py:274
-msgid "Missing entity ID from environment"
-msgstr ""
+msgid "No options specified"
+msgstr "No options specified"
-#: keystone/contrib/federation/controllers.py:282
-msgid "Request must have an origin query parameter"
-msgstr ""
+msgid "Non-default domain is not supported"
+msgstr "Non-default domain is not supported"
-#: keystone/contrib/federation/controllers.py:292
#, python-format
-msgid "%(host)s is not a trusted dashboard host"
-msgstr ""
-
-#: keystone/contrib/federation/controllers.py:333
-msgid "Use a project scoped token when attempting to create a SAML assertion"
-msgstr ""
+msgid "Project (%s)"
+msgstr "Project (%s)"
-#: keystone/contrib/federation/idp.py:454
#, python-format
-msgid "Cannot open certificate %(cert_file)s. Reason: %(reason)s"
-msgstr ""
-
-#: keystone/contrib/federation/idp.py:521
-msgid "Ensure configuration option idp_entity_id is set."
-msgstr ""
-
-#: keystone/contrib/federation/idp.py:524
-msgid "Ensure configuration option idp_sso_endpoint is set."
-msgstr ""
-
-#: keystone/contrib/federation/idp.py:544
-msgid ""
-"idp_contact_type must be one of: [technical, other, support, "
-"administrative or billing."
-msgstr ""
-
-#: keystone/contrib/federation/utils.py:178
-msgid "Federation token is expired"
-msgstr ""
-
-#: keystone/contrib/federation/utils.py:208
-msgid ""
-"Could not find Identity Provider identifier in environment, check "
-"[federation] remote_id_attribute for details."
-msgstr ""
-
-#: keystone/contrib/federation/utils.py:213
-msgid ""
-"Incoming identity provider identifier not included among the accepted "
-"identifiers."
-msgstr ""
+msgid "Project is disabled: %s"
+msgstr "Project is disabled: %s"
-#: keystone/contrib/federation/utils.py:501
-#, python-format
-msgid "User type %s not supported"
-msgstr ""
+msgid "Request Token does not have an authorizing user id"
+msgstr "Request Token does not have an authorizing user id"
-#: keystone/contrib/federation/utils.py:537
#, python-format
msgid ""
-"Invalid rule: %(identity_value)s. Both 'groups' and 'domain' keywords "
-"must be specified."
-msgstr ""
-
-#: keystone/contrib/federation/utils.py:753
-#, python-format
-msgid "Identity Provider %(idp)s is disabled"
-msgstr ""
-
-#: keystone/contrib/federation/utils.py:761
-#, python-format
-msgid "Service Provider %(sp)s is disabled"
-msgstr ""
-
-#: keystone/contrib/oauth1/controllers.py:99
-msgid "Cannot change consumer secret"
-msgstr "Cannot change consumer secret"
-
-#: keystone/contrib/oauth1/controllers.py:131
-msgid "Cannot list request tokens with a token issued via delegation."
-msgstr ""
-
-#: keystone/contrib/oauth1/controllers.py:192
-#: keystone/contrib/oauth1/backends/sql.py:270
-msgid "User IDs do not match"
-msgstr "User IDs do not match"
-
-#: keystone/contrib/oauth1/controllers.py:199
-msgid "Could not find role"
-msgstr "Could not find role"
-
-#: keystone/contrib/oauth1/controllers.py:248
-msgid "Invalid signature"
+"Request attribute %(attribute)s must be less than or equal to %(size)i. The "
+"server could not comply with the request because the attribute size is "
+"invalid (too large). The client is assumed to be in error."
msgstr ""
+"Request attribute %(attribute)s must be less than or equal to %(size)i. The "
+"server could not comply with the request because the attribute size is "
+"invalid (too large). The client is assumed to be in error."
-#: keystone/contrib/oauth1/controllers.py:299
-#: keystone/contrib/oauth1/controllers.py:377
msgid "Request token is expired"
msgstr "Request token is expired"
-#: keystone/contrib/oauth1/controllers.py:313
-msgid "There should not be any non-oauth parameters"
-msgstr "There should not be any non-oauth parameters"
-
-#: keystone/contrib/oauth1/controllers.py:317
-msgid "provided consumer key does not match stored consumer key"
-msgstr "provided consumer key does not match stored consumer key"
-
-#: keystone/contrib/oauth1/controllers.py:321
-msgid "provided verifier does not match stored verifier"
-msgstr "provided verifier does not match stored verifier"
-
-#: keystone/contrib/oauth1/controllers.py:325
-msgid "provided request key does not match stored request key"
-msgstr "provided request key does not match stored request key"
-
-#: keystone/contrib/oauth1/controllers.py:329
-msgid "Request Token does not have an authorizing user id"
-msgstr "Request Token does not have an authorizing user id"
-
-#: keystone/contrib/oauth1/controllers.py:366
-msgid "Cannot authorize a request token with a token issued via delegation."
-msgstr ""
-
-#: keystone/contrib/oauth1/controllers.py:396
-msgid "authorizing user does not have role required"
-msgstr "authorizing user does not have role required"
-
-#: keystone/contrib/oauth1/controllers.py:409
-msgid "User is not a member of the requested project"
-msgstr "User is not a member of the requested project"
-
-#: keystone/contrib/oauth1/backends/sql.py:91
-msgid "Consumer not found"
-msgstr "Consumer not found"
-
-#: keystone/contrib/oauth1/backends/sql.py:186
msgid "Request token not found"
msgstr "Request token not found"
-#: keystone/contrib/oauth1/backends/sql.py:250
-msgid "Access token not found"
-msgstr "Access token not found"
-
-#: keystone/contrib/revoke/controllers.py:33
-#, python-format
-msgid "invalid date format %s"
-msgstr ""
-
-#: keystone/contrib/revoke/core.py:150
-msgid ""
-"The revoke call must not have both domain_id and project_id. This is a "
-"bug in the Keystone server. The current request is aborted."
-msgstr ""
-
-#: keystone/contrib/revoke/core.py:218 keystone/token/provider.py:207
-#: keystone/token/provider.py:230 keystone/token/provider.py:296
-#: keystone/token/provider.py:303
-msgid "Failed to validate token"
-msgstr "Failed to validate token"
-
-#: keystone/identity/controllers.py:72
-msgid "Enabled field must be a boolean"
-msgstr "Enabled field must be a boolean"
-
-#: keystone/identity/controllers.py:98
-msgid "Enabled field should be a boolean"
-msgstr "Enabled field should be a boolean"
-
-#: keystone/identity/core.py:112
-#, python-format
-msgid "Database at /domains/%s/config"
-msgstr ""
-
-#: keystone/identity/core.py:287 keystone/identity/backends/ldap.py:59
-#: keystone/identity/backends/ldap.py:61 keystone/identity/backends/ldap.py:67
-#: keystone/identity/backends/ldap.py:69 keystone/identity/backends/sql.py:104
-#: keystone/identity/backends/sql.py:106
-msgid "Invalid user / password"
-msgstr ""
-
-#: keystone/identity/core.py:693
-#, python-format
-msgid "User is disabled: %s"
-msgstr "User is disabled: %s"
-
-#: keystone/identity/core.py:735
-msgid "Cannot change user ID"
-msgstr ""
-
-#: keystone/identity/backends/ldap.py:99
-msgid "Cannot change user name"
-msgstr ""
-
-#: keystone/identity/backends/ldap.py:188 keystone/identity/backends/sql.py:188
-#: keystone/identity/backends/sql.py:206
#, python-format
-msgid "User '%(user_id)s' not found in group '%(group_id)s'"
-msgstr ""
-
-#: keystone/identity/backends/ldap.py:339
-#, python-format
-msgid "User %(user_id)s is already a member of group %(group_id)s"
-msgstr "User %(user_id)s is already a member of group %(group_id)s"
-
-#: keystone/models/token_model.py:61
-msgid "Found invalid token: scoped to both project and domain."
-msgstr ""
+msgid "Role %s not found"
+msgstr "Role %s not found"
-#: keystone/openstack/common/versionutils.py:108
-#, python-format
-msgid ""
-"%(what)s is deprecated as of %(as_of)s in favor of %(in_favor_of)s and "
-"may be removed in %(remove_in)s."
-msgstr ""
-"%(what)s is deprecated as of %(as_of)s in favor of %(in_favor_of)s and "
-"may be removed in %(remove_in)s."
+msgid "Scoping to both domain and project is not allowed"
+msgstr "Scoping to both domain and project is not allowed"
-#: keystone/openstack/common/versionutils.py:112
-#, python-format
-msgid ""
-"%(what)s is deprecated as of %(as_of)s and may be removed in "
-"%(remove_in)s. It will not be superseded."
-msgstr ""
-"%(what)s is deprecated as of %(as_of)s and may be removed in "
-"%(remove_in)s. It will not be superseded."
+msgid "Scoping to both domain and trust is not allowed"
+msgstr "Scoping to both domain and trust is not allowed"
-#: keystone/openstack/common/versionutils.py:116
-#, python-format
-msgid "%(what)s is deprecated as of %(as_of)s in favor of %(in_favor_of)s."
-msgstr ""
+msgid "Scoping to both project and trust is not allowed"
+msgstr "Scoping to both project and trust is not allowed"
-#: keystone/openstack/common/versionutils.py:119
-#, python-format
-msgid "%(what)s is deprecated as of %(as_of)s. It will not be superseded."
-msgstr ""
+msgid "Specify a domain or project, not both"
+msgstr "Specify a domain or project, not both"
-#: keystone/openstack/common/versionutils.py:241
-#, python-format
-msgid "Deprecated: %s"
-msgstr "Deprecated: %s"
+msgid "Specify a user or group, not both"
+msgstr "Specify a user or group, not both"
-#: keystone/openstack/common/versionutils.py:259
#, python-format
-msgid "Fatal call to deprecated config: %(msg)s"
-msgstr "Fatal call to deprecated config: %(msg)s"
-
-#: keystone/resource/controllers.py:231
-msgid ""
-"Cannot use parents_as_list and parents_as_ids query params at the same "
-"time."
-msgstr ""
-
-#: keystone/resource/controllers.py:237
msgid ""
-"Cannot use subtree_as_list and subtree_as_ids query params at the same "
-"time."
-msgstr ""
-
-#: keystone/resource/core.py:80
-#, python-format
-msgid "max hierarchy depth reached for %s branch."
-msgstr ""
-
-#: keystone/resource/core.py:97
-msgid "cannot create a project within a different domain than its parents."
-msgstr ""
-
-#: keystone/resource/core.py:101
-#, python-format
-msgid "cannot create a project in a branch containing a disabled project: %s"
-msgstr ""
-
-#: keystone/resource/core.py:123
-#, python-format
-msgid "Domain is disabled: %s"
-msgstr "Domain is disabled: %s"
-
-#: keystone/resource/core.py:141
-#, python-format
-msgid "Domain cannot be named %s"
+"String length exceeded.The length of string '%(string)s' exceeded the limit "
+"of column %(type)s(CHAR(%(length)d))."
msgstr ""
+"String length exceeded.The length of string '%(string)s' exceeded the limit "
+"of column %(type)s(CHAR(%(length)d))."
-#: keystone/resource/core.py:144
#, python-format
-msgid "Domain cannot have ID %s"
-msgstr ""
-
-#: keystone/resource/core.py:156
-#, python-format
-msgid "Project is disabled: %s"
-msgstr "Project is disabled: %s"
-
-#: keystone/resource/core.py:176
-#, python-format
-msgid "cannot enable project %s since it has disabled parents"
-msgstr ""
-
-#: keystone/resource/core.py:184
-#, python-format
-msgid "cannot disable project %s since its subtree contains enabled projects"
-msgstr ""
-
-#: keystone/resource/core.py:195
-msgid "Update of `parent_id` is not allowed."
-msgstr ""
-
-#: keystone/resource/core.py:222
-#, python-format
-msgid "cannot delete the project %s since it is not a leaf in the hierarchy."
-msgstr ""
-
-#: keystone/resource/core.py:376
-msgid "Multiple domains are not supported"
-msgstr ""
-
-#: keystone/resource/core.py:429
-msgid "delete the default domain"
-msgstr ""
-
-#: keystone/resource/core.py:440
-msgid "cannot delete a domain that is enabled, please disable it first."
-msgstr ""
+msgid "The Keystone configuration file %(config_file)s could not be found."
+msgstr "The Keystone configuration file %(config_file)s could not be found."
-#: keystone/resource/core.py:841
-msgid "No options specified"
-msgstr "No options specified"
+msgid "The action you have requested has not been implemented."
+msgstr "The action you have requested has not been implemented."
-#: keystone/resource/core.py:847
-#, python-format
-msgid ""
-"The value of group %(group)s specified in the config should be a "
-"dictionary of options"
-msgstr ""
+msgid "The request you have made requires authentication."
+msgstr "The request you have made requires authentication."
-#: keystone/resource/core.py:871
-#, python-format
-msgid ""
-"Option %(option)s found with no group specified while checking domain "
-"configuration request"
-msgstr ""
+msgid "The resource could not be found."
+msgstr "The resource could not be found."
-#: keystone/resource/core.py:878
-#, python-format
-msgid "Group %(group)s is not supported for domain specific configurations"
-msgstr ""
+msgid "There should not be any non-oauth parameters"
+msgstr "There should not be any non-oauth parameters"
-#: keystone/resource/core.py:885
-#, python-format
msgid ""
-"Option %(option)s in group %(group)s is not supported for domain specific"
-" configurations"
-msgstr ""
-
-#: keystone/resource/core.py:938
-msgid "An unexpected error occurred when retrieving domain configs"
-msgstr ""
-
-#: keystone/resource/core.py:1013 keystone/resource/core.py:1097
-#: keystone/resource/core.py:1167 keystone/resource/config_backends/sql.py:70
-#, python-format
-msgid "option %(option)s in group %(group)s"
+"Timestamp not in expected format. The server could not comply with the "
+"request since it is either malformed or otherwise incorrect. The client is "
+"assumed to be in error."
msgstr ""
+"Timestamp not in expected format. The server could not comply with the "
+"request since it is either malformed or otherwise incorrect. The client is "
+"assumed to be in error."
-#: keystone/resource/core.py:1016 keystone/resource/core.py:1102
-#: keystone/resource/core.py:1163
-#, python-format
-msgid "group %(group)s"
-msgstr ""
+msgid "Token belongs to another user"
+msgstr "Token belongs to another user"
-#: keystone/resource/core.py:1018
-msgid "any options"
-msgstr ""
+msgid "Token does not belong to specified tenant."
+msgstr "Token does not belong to specified tenant."
-#: keystone/resource/core.py:1062
-#, python-format
-msgid ""
-"Trying to update option %(option)s in group %(group)s, so that, and only "
-"that, option must be specified in the config"
-msgstr ""
+msgid "Trustee has no delegated roles."
+msgstr "Trustee has no delegated roles."
-#: keystone/resource/core.py:1067
-#, python-format
-msgid ""
-"Trying to update group %(group)s, so that, and only that, group must be "
-"specified in the config"
-msgstr ""
+msgid "Trustor is disabled."
+msgstr "Trustor is disabled."
-#: keystone/resource/core.py:1076
#, python-format
-msgid ""
-"request to update group %(group)s, but config provided contains group "
-"%(group_other)s instead"
-msgstr ""
+msgid "Unable to locate domain config directory: %s"
+msgstr "Unable to locate domain config directory: %s"
-#: keystone/resource/core.py:1083
#, python-format
-msgid ""
-"Trying to update option %(option)s in group %(group)s, but config "
-"provided contains option %(option_other)s instead"
-msgstr ""
-
-#: keystone/resource/backends/ldap.py:151
-#: keystone/resource/backends/ldap.py:159
-#: keystone/resource/backends/ldap.py:163
-msgid "Domains are read-only against LDAP"
-msgstr ""
+msgid "Unable to lookup user %s"
+msgstr "Unable to lookup user %s"
-#: keystone/server/eventlet.py:77
-msgid ""
-"Running keystone via eventlet is deprecated as of Kilo in favor of "
-"running in a WSGI server (e.g. mod_wsgi). Support for keystone under "
-"eventlet will be removed in the \"M\"-Release."
-msgstr ""
+msgid "Unable to sign token."
+msgstr "Unable to sign token."
-#: keystone/server/eventlet.py:90
-#, python-format
-msgid "Failed to start the %(name)s server"
-msgstr ""
+msgid "Unknown Target"
+msgstr "Unknown Target"
-#: keystone/token/controllers.py:391
#, python-format
msgid "User %(u_id)s is unauthorized for tenant %(t_id)s"
msgstr "User %(u_id)s is unauthorized for tenant %(t_id)s"
-#: keystone/token/controllers.py:410 keystone/token/controllers.py:413
-msgid "Token does not belong to specified tenant."
-msgstr "Token does not belong to specified tenant."
-
-#: keystone/token/persistence/backends/kvs.py:133
#, python-format
-msgid "Unknown token version %s"
-msgstr ""
+msgid "User %(user_id)s has no access to domain %(domain_id)s"
+msgstr "User %(user_id)s has no access to domain %(domain_id)s"
-#: keystone/token/providers/common.py:250
-#: keystone/token/providers/common.py:355
#, python-format
msgid "User %(user_id)s has no access to project %(project_id)s"
msgstr "User %(user_id)s has no access to project %(project_id)s"
-#: keystone/token/providers/common.py:255
-#: keystone/token/providers/common.py:360
#, python-format
-msgid "User %(user_id)s has no access to domain %(domain_id)s"
-msgstr "User %(user_id)s has no access to domain %(domain_id)s"
-
-#: keystone/token/providers/common.py:282
-msgid "Trustor is disabled."
-msgstr "Trustor is disabled."
+msgid "User %(user_id)s is already a member of group %(group_id)s"
+msgstr "User %(user_id)s is already a member of group %(group_id)s"
-#: keystone/token/providers/common.py:346
-msgid "Trustee has no delegated roles."
-msgstr "Trustee has no delegated roles."
+msgid "User IDs do not match"
+msgstr "User IDs do not match"
-#: keystone/token/providers/common.py:407
#, python-format
-msgid "Invalid audit info data type: %(data)s (%(type)s)"
-msgstr ""
+msgid "User is disabled: %s"
+msgstr "User is disabled: %s"
+
+msgid "User is not a member of the requested project"
+msgstr "User is not a member of the requested project"
-#: keystone/token/providers/common.py:435
msgid "User is not a trustee."
msgstr "User is not a trustee."
-#: keystone/token/providers/common.py:579
-msgid ""
-"Attempting to use OS-FEDERATION token with V2 Identity Service, use V3 "
-"Authentication"
-msgstr ""
+msgid "User not found"
+msgstr "User not found"
-#: keystone/token/providers/common.py:597
-msgid "Domain scoped token is not supported"
-msgstr "Domain scoped token is not supported"
+msgid "You are not authorized to perform the requested action."
+msgstr "You are not authorized to perform the requested action."
-#: keystone/token/providers/pki.py:48 keystone/token/providers/pkiz.py:30
-msgid "Unable to sign token."
-msgstr "Unable to sign token."
+msgid "authorizing user does not have role required"
+msgstr "authorizing user does not have role required"
-#: keystone/token/providers/fernet/core.py:215
-msgid ""
-"This is not a v2.0 Fernet token. Use v3 for trust, domain, or federated "
-"tokens."
-msgstr ""
+msgid "pad must be single character"
+msgstr "pad must be single character"
-#: keystone/token/providers/fernet/token_formatters.py:189
-#, python-format
-msgid "This is not a recognized Fernet payload version: %s"
-msgstr ""
+msgid "padded base64url text must be multiple of 4 characters"
+msgstr "padded base64url text must be multiple of 4 characters"
-#: keystone/trust/controllers.py:148
-msgid "Redelegation allowed for delegated by trust only"
-msgstr ""
+msgid "provided consumer key does not match stored consumer key"
+msgstr "provided consumer key does not match stored consumer key"
-#: keystone/trust/controllers.py:181
-msgid "The authenticated user should match the trustor."
-msgstr ""
+msgid "provided request key does not match stored request key"
+msgstr "provided request key does not match stored request key"
-#: keystone/trust/controllers.py:186
-msgid "At least one role should be specified."
-msgstr ""
+msgid "provided verifier does not match stored verifier"
+msgstr "provided verifier does not match stored verifier"
-#: keystone/trust/core.py:57
-#, python-format
-msgid ""
-"Remaining redelegation depth of %(redelegation_depth)d out of allowed "
-"range of [0..%(max_count)d]"
-msgstr ""
+msgid "region not type dogpile.cache.CacheRegion"
+msgstr "region not type dogpile.cache.CacheRegion"
-#: keystone/trust/core.py:66
#, python-format
-msgid ""
-"Field \"remaining_uses\" is set to %(value)s while it must not be set in "
-"order to redelegate a trust"
-msgstr ""
-
-#: keystone/trust/core.py:77
-msgid "Requested expiration time is more than redelegated trust can provide"
-msgstr ""
-
-#: keystone/trust/core.py:87
-msgid "Some of requested roles are not in redelegated trust"
-msgstr ""
-
-#: keystone/trust/core.py:116
-msgid "One of the trust agents is disabled or deleted"
-msgstr ""
-
-#: keystone/trust/core.py:135
-msgid "remaining_uses must be a positive integer or null."
-msgstr ""
+msgid "text is multiple of 4, but pad \"%s\" occurs before 2nd to last char"
+msgstr "text is multiple of 4, but pad \"%s\" occurs before 2nd to last char"
-#: keystone/trust/core.py:141
#, python-format
-msgid ""
-"Requested redelegation depth of %(requested_count)d is greater than "
-"allowed %(max_count)d"
-msgstr ""
+msgid "text is multiple of 4, but pad \"%s\" occurs before non-pad last char"
+msgstr "text is multiple of 4, but pad \"%s\" occurs before non-pad last char"
-#: keystone/trust/core.py:147
-msgid "remaining_uses must not be set if redelegation is allowed"
-msgstr ""
+#, python-format
+msgid "text is not a multiple of 4, but contains pad \"%s\""
+msgstr "text is not a multiple of 4, but contains pad \"%s\""
-#: keystone/trust/core.py:157
-msgid ""
-"Modifying \"redelegation_count\" upon redelegation is forbidden. Omitting"
-" this parameter is advised."
-msgstr ""
+#, python-format
+msgid "tls_cacertdir %s not found or is not a directory"
+msgstr "tls_cacertdir %s not found or is not a directory"
+#, python-format
+msgid "tls_cacertfile %s not found or is not a file"
+msgstr "tls_cacertfile %s not found or is not a file"
diff --git a/keystone-moon/keystone/locale/es/LC_MESSAGES/keystone-log-critical.po b/keystone-moon/keystone/locale/es/LC_MESSAGES/keystone-log-critical.po
index 6ebff226..336c5d33 100644
--- a/keystone-moon/keystone/locale/es/LC_MESSAGES/keystone-log-critical.po
+++ b/keystone-moon/keystone/locale/es/LC_MESSAGES/keystone-log-critical.po
@@ -1,5 +1,5 @@
# Translations template for keystone.
-# Copyright (C) 2014 OpenStack Foundation
+# Copyright (C) 2015 OpenStack Foundation
# This file is distributed under the same license as the keystone project.
#
# Translators:
@@ -7,19 +7,18 @@ msgid ""
msgstr ""
"Project-Id-Version: Keystone\n"
"Report-Msgid-Bugs-To: https://bugs.launchpad.net/keystone\n"
-"POT-Creation-Date: 2014-09-07 06:06+0000\n"
+"POT-Creation-Date: 2015-08-06 06:28+0000\n"
"PO-Revision-Date: 2014-08-31 15:19+0000\n"
"Last-Translator: openstackjenkins <jenkins@openstack.org>\n"
-"Language-Team: Spanish (http://www.transifex.com/projects/p/keystone/"
-"language/es/)\n"
+"Language-Team: Spanish (http://www.transifex.com/openstack/keystone/language/"
+"es/)\n"
"Language: es\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
-"Generated-By: Babel 1.3\n"
+"Generated-By: Babel 2.0\n"
"Plural-Forms: nplurals=2; plural=(n != 1);\n"
-#: keystone/catalog/backends/templated.py:106
#, python-format
msgid "Unable to open template file %s"
msgstr "No se puede abrir el archivo de plantilla %s"
diff --git a/keystone-moon/keystone/locale/fr/LC_MESSAGES/keystone-log-critical.po b/keystone-moon/keystone/locale/fr/LC_MESSAGES/keystone-log-critical.po
index c40440be..8657e66a 100644
--- a/keystone-moon/keystone/locale/fr/LC_MESSAGES/keystone-log-critical.po
+++ b/keystone-moon/keystone/locale/fr/LC_MESSAGES/keystone-log-critical.po
@@ -1,5 +1,5 @@
# Translations template for keystone.
-# Copyright (C) 2014 OpenStack Foundation
+# Copyright (C) 2015 OpenStack Foundation
# This file is distributed under the same license as the keystone project.
#
# Translators:
@@ -7,19 +7,18 @@ msgid ""
msgstr ""
"Project-Id-Version: Keystone\n"
"Report-Msgid-Bugs-To: https://bugs.launchpad.net/keystone\n"
-"POT-Creation-Date: 2014-09-07 06:06+0000\n"
+"POT-Creation-Date: 2015-08-06 06:28+0000\n"
"PO-Revision-Date: 2014-08-31 15:19+0000\n"
"Last-Translator: openstackjenkins <jenkins@openstack.org>\n"
-"Language-Team: French (http://www.transifex.com/projects/p/keystone/language/"
+"Language-Team: French (http://www.transifex.com/openstack/keystone/language/"
"fr/)\n"
"Language: fr\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
-"Generated-By: Babel 1.3\n"
+"Generated-By: Babel 2.0\n"
"Plural-Forms: nplurals=2; plural=(n > 1);\n"
-#: keystone/catalog/backends/templated.py:106
#, python-format
msgid "Unable to open template file %s"
msgstr "Impossible d'ouvrir le fichier modèle %s"
diff --git a/keystone-moon/keystone/locale/fr/LC_MESSAGES/keystone-log-error.po b/keystone-moon/keystone/locale/fr/LC_MESSAGES/keystone-log-error.po
index d8dc409f..ba787ee3 100644
--- a/keystone-moon/keystone/locale/fr/LC_MESSAGES/keystone-log-error.po
+++ b/keystone-moon/keystone/locale/fr/LC_MESSAGES/keystone-log-error.po
@@ -9,70 +9,33 @@ msgid ""
msgstr ""
"Project-Id-Version: Keystone\n"
"Report-Msgid-Bugs-To: https://bugs.launchpad.net/keystone\n"
-"POT-Creation-Date: 2015-03-09 06:03+0000\n"
-"PO-Revision-Date: 2015-03-07 04:31+0000\n"
+"POT-Creation-Date: 2015-08-06 06:28+0000\n"
+"PO-Revision-Date: 2015-06-26 17:13+0000\n"
"Last-Translator: openstackjenkins <jenkins@openstack.org>\n"
-"Language-Team: French (http://www.transifex.com/projects/p/keystone/language/"
+"Language-Team: French (http://www.transifex.com/openstack/keystone/language/"
"fr/)\n"
"Language: fr\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
-"Generated-By: Babel 1.3\n"
+"Generated-By: Babel 2.0\n"
"Plural-Forms: nplurals=2; plural=(n > 1);\n"
-#: keystone/notifications.py:304
-msgid "Failed to construct notifier"
-msgstr "Échec de construction de la notification"
-
-#: keystone/notifications.py:389
-#, python-format
-msgid "Failed to send %(res_id)s %(event_type)s notification"
-msgstr "Échec de l'envoi de la notification %(res_id)s %(event_type)s"
-
-#: keystone/notifications.py:606
-#, python-format
-msgid "Failed to send %(action)s %(event_type)s notification"
-msgstr "Échec de l'envoi de la notification %(action)s %(event_type)s "
-
-#: keystone/catalog/core.py:62
-#, python-format
-msgid "Malformed endpoint - %(url)r is not a string"
-msgstr "Critère mal formé - %(url)r n'est pas une chaine de caractère"
-
-#: keystone/catalog/core.py:66
-#, python-format
-msgid "Malformed endpoint %(url)s - unknown key %(keyerror)s"
-msgstr "Noeud final incorrect %(url)s - clé inconnue %(keyerror)s"
-
-#: keystone/catalog/core.py:71
#, python-format
msgid ""
-"Malformed endpoint '%(url)s'. The following type error occurred during "
-"string substitution: %(typeerror)s"
-msgstr ""
-"Noeud final incorrect '%(url)s'. L'erreur suivante est survenue pendant la "
-"substitution de chaine : %(typeerror)s"
-
-#: keystone/catalog/core.py:77
-#, python-format
-msgid ""
-"Malformed endpoint %s - incomplete format (are you missing a type notifier ?)"
+"Circular reference or a repeated entry found in region tree - %(region_id)s."
msgstr ""
-"Noeud final incorrect '%s - Format incomplet (un type de notification manque-"
-"t-il ?)"
+"Référence circulaire ou entrée dupliquée trouvée dans l'arbre de la région - "
+"%(region_id)s."
-#: keystone/common/openssl.py:93
#, python-format
msgid "Command %(to_exec)s exited with %(retcode)s- %(output)s"
msgstr "La commande %(to_exec)s a retourné %(retcode)s- %(output)s"
-#: keystone/common/openssl.py:121
#, python-format
-msgid "Failed to remove file %(file_path)r: %(error)s"
-msgstr "Échec de la suppression du fichier %(file_path)r: %(error)s"
+msgid "Could not bind to %(host)s:%(port)s"
+msgstr "Impossible de s'attacher à %(host)s:%(port)s"
-#: keystone/common/utils.py:239
msgid ""
"Error setting up the debug environment. Verify that the option --debug-url "
"has the format <host>:<port> and that a debugger processes is listening on "
@@ -82,103 +45,53 @@ msgstr ""
"l'option --debug-url a le format <host>:<port> et que le processus de "
"débogage écoute sur ce port."
-#: keystone/common/cache/core.py:100
#, python-format
-msgid ""
-"Unable to build cache config-key. Expected format \"<argname>:<value>\". "
-"Skipping unknown format: %s"
-msgstr ""
-
-#: keystone/common/environment/eventlet_server.py:99
-#, python-format
-msgid "Could not bind to %(host)s:%(port)s"
-msgstr "Impossible de s'attacher à %(host)s:%(port)s"
+msgid "Error when signing assertion, reason: %(reason)s"
+msgstr "Erreur lors de la signature d'une assertion : %(reason)s"
-#: keystone/common/environment/eventlet_server.py:185
-msgid "Server error"
-msgstr "Erreur serveur"
+msgid "Failed to construct notifier"
+msgstr "Échec de construction de la notification"
-#: keystone/contrib/endpoint_policy/core.py:129
-#: keystone/contrib/endpoint_policy/core.py:228
#, python-format
-msgid ""
-"Circular reference or a repeated entry found in region tree - %(region_id)s."
-msgstr ""
-"Référence circulaire ou entrée dupliquée trouvée dans l'arbre de la région - "
-"%(region_id)s."
+msgid "Failed to remove file %(file_path)r: %(error)s"
+msgstr "Échec de la suppression du fichier %(file_path)r: %(error)s"
-#: keystone/contrib/federation/idp.py:410
#, python-format
-msgid "Error when signing assertion, reason: %(reason)s"
-msgstr "Erreur lors de la signature d'une assertion : %(reason)s"
-
-#: keystone/contrib/oauth1/core.py:136
-msgid "Cannot retrieve Authorization headers"
-msgstr ""
-
-#: keystone/openstack/common/loopingcall.py:95
-msgid "in fixed duration looping call"
-msgstr "dans l'appel en boucle de durée fixe"
+msgid "Failed to send %(action)s %(event_type)s notification"
+msgstr "Échec de l'envoi de la notification %(action)s %(event_type)s "
-#: keystone/openstack/common/loopingcall.py:138
-msgid "in dynamic looping call"
-msgstr "dans l'appel en boucle dynamique"
+#, python-format
+msgid "Failed to send %(res_id)s %(event_type)s notification"
+msgstr "Échec de l'envoi de la notification %(res_id)s %(event_type)s"
-#: keystone/openstack/common/service.py:268
-msgid "Unhandled exception"
-msgstr "Exception non gérée"
+msgid "Failed to validate token"
+msgstr "Echec de validation du token"
-#: keystone/resource/core.py:477
#, python-format
-msgid ""
-"Circular reference or a repeated entry found projects hierarchy - "
-"%(project_id)s."
-msgstr ""
+msgid "Malformed endpoint %(url)s - unknown key %(keyerror)s"
+msgstr "Noeud final incorrect %(url)s - clé inconnue %(keyerror)s"
-#: keystone/resource/core.py:939
#, python-format
msgid ""
-"Unexpected results in response for domain config - %(count)s responses, "
-"first option is %(option)s, expected option %(expected)s"
+"Malformed endpoint %s - incomplete format (are you missing a type notifier ?)"
msgstr ""
+"Noeud final incorrect '%s - Format incomplet (un type de notification manque-"
+"t-il ?)"
-#: keystone/resource/backends/sql.py:102 keystone/resource/backends/sql.py:121
#, python-format
msgid ""
-"Circular reference or a repeated entry found in projects hierarchy - "
-"%(project_id)s."
-msgstr ""
-
-#: keystone/token/provider.py:292
-#, python-format
-msgid "Unexpected error or malformed token determining token expiry: %s"
+"Malformed endpoint '%(url)s'. The following type error occurred during "
+"string substitution: %(typeerror)s"
msgstr ""
+"Noeud final incorrect '%(url)s'. L'erreur suivante est survenue pendant la "
+"substitution de chaine : %(typeerror)s"
-#: keystone/token/persistence/backends/kvs.py:226
#, python-format
-msgid ""
-"Reinitializing revocation list due to error in loading revocation list from "
-"backend. Expected `list` type got `%(type)s`. Old revocation list data: "
-"%(list)r"
-msgstr ""
+msgid "Malformed endpoint - %(url)r is not a string"
+msgstr "Critère mal formé - %(url)r n'est pas une chaine de caractère"
-#: keystone/token/providers/common.py:611
-msgid "Failed to validate token"
-msgstr "Echec de validation du token"
+msgid "Server error"
+msgstr "Erreur serveur"
-#: keystone/token/providers/pki.py:47
msgid "Unable to sign token"
msgstr "Impossible de signer le jeton"
-
-#: keystone/token/providers/fernet/utils.py:38
-#, python-format
-msgid ""
-"Either [fernet_tokens] key_repository does not exist or Keystone does not "
-"have sufficient permission to access it: %s"
-msgstr ""
-
-#: keystone/token/providers/fernet/utils.py:79
-msgid ""
-"Failed to create [fernet_tokens] key_repository: either it already exists or "
-"you don't have sufficient permissions to create it"
-msgstr ""
diff --git a/keystone-moon/keystone/locale/fr/LC_MESSAGES/keystone-log-info.po b/keystone-moon/keystone/locale/fr/LC_MESSAGES/keystone-log-info.po
index 065540dc..08cee0e0 100644
--- a/keystone-moon/keystone/locale/fr/LC_MESSAGES/keystone-log-info.po
+++ b/keystone-moon/keystone/locale/fr/LC_MESSAGES/keystone-log-info.po
@@ -5,38 +5,23 @@
# Translators:
# Bruno Cornec <bruno.cornec@hp.com>, 2014
# Maxime COQUEREL <max.coquerel@gmail.com>, 2014
-# Andrew_Melim <nokostya.translation@gmail.com>, 2014
+# Andrew Melim <nokostya.translation@gmail.com>, 2014
msgid ""
msgstr ""
"Project-Id-Version: Keystone\n"
"Report-Msgid-Bugs-To: https://bugs.launchpad.net/keystone\n"
-"POT-Creation-Date: 2015-03-09 06:03+0000\n"
-"PO-Revision-Date: 2015-03-08 17:01+0000\n"
+"POT-Creation-Date: 2015-08-06 06:28+0000\n"
+"PO-Revision-Date: 2015-08-01 06:26+0000\n"
"Last-Translator: openstackjenkins <jenkins@openstack.org>\n"
-"Language-Team: French (http://www.transifex.com/projects/p/keystone/language/"
+"Language-Team: French (http://www.transifex.com/openstack/keystone/language/"
"fr/)\n"
"Language: fr\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
-"Generated-By: Babel 1.3\n"
+"Generated-By: Babel 2.0\n"
"Plural-Forms: nplurals=2; plural=(n > 1);\n"
-#: keystone/assignment/core.py:250
-#, python-format
-msgid "Creating the default role %s because it does not exist."
-msgstr "Création du rôle par défaut %s, car il n'existe pas"
-
-#: keystone/assignment/core.py:258
-#, python-format
-msgid "Creating the default role %s failed because it was already created"
-msgstr ""
-
-#: keystone/auth/controllers.py:64
-msgid "Loading auth-plugins by class-name is deprecated."
-msgstr "Chargement de auth-plugins par class-name est déprécié"
-
-#: keystone/auth/controllers.py:106
#, python-format
msgid ""
"\"expires_at\" has conflicting values %(existing)s and %(new)s. Will use "
@@ -45,68 +30,41 @@ msgstr ""
"\"expires_at\" a des valeurs conflictuelles %(existing)s et %(new)s. "
"Utilsation de la première valeur."
-#: keystone/common/openssl.py:81
#, python-format
-msgid "Running command - %s"
-msgstr "Exécution de la commande %s"
-
-#: keystone/common/wsgi.py:79
-msgid "No bind information present in token"
-msgstr "Aucune information d'attachement n'est présente dans le jeton"
-
-#: keystone/common/wsgi.py:83
-#, python-format
-msgid "Named bind mode %s not in bind information"
-msgstr ""
-"Le mode d'attachement nommé %s n'est pas dans l'information d'attachement"
-
-#: keystone/common/wsgi.py:90
-msgid "Kerberos credentials required and not present"
-msgstr "L'identitification Kerberos est requise mais non présente"
-
-#: keystone/common/wsgi.py:94
-msgid "Kerberos credentials do not match those in bind"
-msgstr "L'identification Kerberos ne correspond pas à celle de l'attachement"
-
-#: keystone/common/wsgi.py:98
-msgid "Kerberos bind authentication successful"
-msgstr "Attachement Kerberos identifié correctement"
+msgid "Adding proxy '%(proxy)s' to KVS %(name)s."
+msgstr "Ahour du mandataire '%(proxy)s' au KVS %(name)s."
-#: keystone/common/wsgi.py:105
#, python-format
msgid "Couldn't verify unknown bind: {%(bind_type)s: %(identifier)s}"
msgstr ""
"Impossible de vérifier l'attachement inconnu: {%(bind_type)s: "
"%(identifier)s}"
-#: keystone/common/environment/eventlet_server.py:103
#, python-format
-msgid "Starting %(arg0)s on %(host)s:%(port)s"
-msgstr "Démarrage de %(arg0)s sur %(host)s:%(port)s"
+msgid "Creating the default role %s because it does not exist."
+msgstr "Création du rôle par défaut %s, car il n'existe pas"
-#: keystone/common/kvs/core.py:138
#, python-format
-msgid "Adding proxy '%(proxy)s' to KVS %(name)s."
-msgstr "Ahour du mandataire '%(proxy)s' au KVS %(name)s."
+msgid "KVS region %s key_mangler disabled."
+msgstr "Région KVS %s key_mangler désactivée"
-#: keystone/common/kvs/core.py:188
-#, python-format
-msgid "Using %(func)s as KVS region %(name)s key_mangler"
-msgstr "Utilise %(func)s comme région KVS %(name)s key_mangler"
+msgid "Kerberos bind authentication successful"
+msgstr "Attachement Kerberos identifié correctement"
+
+msgid "Kerberos credentials do not match those in bind"
+msgstr "L'identification Kerberos ne correspond pas à celle de l'attachement"
+
+msgid "Kerberos credentials required and not present"
+msgstr "L'identitification Kerberos est requise mais non présente"
-#: keystone/common/kvs/core.py:200
#, python-format
-msgid "Using default dogpile sha1_mangle_key as KVS region %s key_mangler"
+msgid "Named bind mode %s not in bind information"
msgstr ""
-"Utilisation du dogpile sha1_mangle_key par défaut comme région KVS %s "
-"key_mangler"
+"Le mode d'attachement nommé %s n'est pas dans l'information d'attachement"
-#: keystone/common/kvs/core.py:210
-#, python-format
-msgid "KVS region %s key_mangler disabled."
-msgstr "Région KVS %s key_mangler désactivée"
+msgid "No bind information present in token"
+msgstr "Aucune information d'attachement n'est présente dans le jeton"
-#: keystone/contrib/example/core.py:64 keystone/contrib/example/core.py:73
#, python-format
msgid ""
"Received the following notification: service %(service)s, resource_type: "
@@ -115,109 +73,24 @@ msgstr ""
"Réception de la notification suivante: service %(service)s, resource_type: "
"%(resource_type)s, operation %(operation)s payload %(payload)s"
-#: keystone/openstack/common/eventlet_backdoor.py:146
-#, python-format
-msgid "Eventlet backdoor listening on %(port)s for process %(pid)d"
-msgstr "Eventlet backdoor en écoute sur le port %(port)s for process %(pid)d"
-
-#: keystone/openstack/common/service.py:173
-#, python-format
-msgid "Caught %s, exiting"
-msgstr "%s interceptée, sortie"
-
-#: keystone/openstack/common/service.py:231
-msgid "Parent process has died unexpectedly, exiting"
-msgstr "Processus parent arrêté de manière inattendue, sortie"
-
-#: keystone/openstack/common/service.py:262
-#, python-format
-msgid "Child caught %s, exiting"
-msgstr "L'enfant a reçu %s, sortie"
-
-#: keystone/openstack/common/service.py:301
-msgid "Forking too fast, sleeping"
-msgstr "Bifurcation trop rapide, pause"
-
-#: keystone/openstack/common/service.py:320
#, python-format
-msgid "Started child %d"
-msgstr "Enfant démarré %d"
-
-#: keystone/openstack/common/service.py:330
-#, python-format
-msgid "Starting %d workers"
-msgstr "Démarrage des travailleurs %d"
-
-#: keystone/openstack/common/service.py:347
-#, python-format
-msgid "Child %(pid)d killed by signal %(sig)d"
-msgstr "Enfant %(pid)d arrêté par le signal %(sig)d"
-
-#: keystone/openstack/common/service.py:351
-#, python-format
-msgid "Child %(pid)s exited with status %(code)d"
-msgstr "Processus fils %(pid)s terminé avec le status %(code)d"
-
-#: keystone/openstack/common/service.py:390
-#, python-format
-msgid "Caught %s, stopping children"
-msgstr "%s interceptée, arrêt de l'enfant"
-
-#: keystone/openstack/common/service.py:399
-msgid "Wait called after thread killed. Cleaning up."
-msgstr "Pause demandée après suppression de thread. Nettoyage."
+msgid "Running command - %s"
+msgstr "Exécution de la commande %s"
-#: keystone/openstack/common/service.py:415
#, python-format
-msgid "Waiting on %d children to exit"
-msgstr "En attente %d enfants pour sortie"
+msgid "Starting %(arg0)s on %(host)s:%(port)s"
+msgstr "Démarrage de %(arg0)s sur %(host)s:%(port)s"
-#: keystone/token/persistence/backends/sql.py:279
#, python-format
msgid "Total expired tokens removed: %d"
msgstr "Total des jetons expirés effacés: %d"
-#: keystone/token/providers/fernet/utils.py:72
-msgid ""
-"[fernet_tokens] key_repository does not appear to exist; attempting to "
-"create it"
-msgstr ""
-
-#: keystone/token/providers/fernet/utils.py:130
-#, python-format
-msgid "Created a new key: %s"
-msgstr ""
-
-#: keystone/token/providers/fernet/utils.py:143
-msgid "Key repository is already initialized; aborting."
-msgstr ""
-
-#: keystone/token/providers/fernet/utils.py:179
-#, python-format
-msgid "Starting key rotation with %(count)s key files: %(list)s"
-msgstr ""
-
-#: keystone/token/providers/fernet/utils.py:185
-#, python-format
-msgid "Current primary key is: %s"
-msgstr ""
-
-#: keystone/token/providers/fernet/utils.py:187
-#, python-format
-msgid "Next primary key will be: %s"
-msgstr ""
-
-#: keystone/token/providers/fernet/utils.py:197
-#, python-format
-msgid "Promoted key 0 to be the primary: %s"
-msgstr ""
-
-#: keystone/token/providers/fernet/utils.py:213
#, python-format
-msgid "Excess keys to purge: %s"
-msgstr ""
+msgid "Using %(func)s as KVS region %(name)s key_mangler"
+msgstr "Utilise %(func)s comme région KVS %(name)s key_mangler"
-#: keystone/token/providers/fernet/utils.py:237
#, python-format
-msgid "Loaded %(count)s encryption keys from: %(dir)s"
+msgid "Using default dogpile sha1_mangle_key as KVS region %s key_mangler"
msgstr ""
+"Utilisation du dogpile sha1_mangle_key par défaut comme région KVS %s "
+"key_mangler"
diff --git a/keystone-moon/keystone/locale/fr/LC_MESSAGES/keystone-log-warning.po b/keystone-moon/keystone/locale/fr/LC_MESSAGES/keystone-log-warning.po
index a83b88a5..d2fddf29 100644
--- a/keystone-moon/keystone/locale/fr/LC_MESSAGES/keystone-log-warning.po
+++ b/keystone-moon/keystone/locale/fr/LC_MESSAGES/keystone-log-warning.po
@@ -9,142 +9,34 @@ msgid ""
msgstr ""
"Project-Id-Version: Keystone\n"
"Report-Msgid-Bugs-To: https://bugs.launchpad.net/keystone\n"
-"POT-Creation-Date: 2015-03-19 06:04+0000\n"
-"PO-Revision-Date: 2015-03-19 02:24+0000\n"
+"POT-Creation-Date: 2015-08-06 06:28+0000\n"
+"PO-Revision-Date: 2015-07-29 06:04+0000\n"
"Last-Translator: openstackjenkins <jenkins@openstack.org>\n"
-"Language-Team: French (http://www.transifex.com/projects/p/keystone/language/"
+"Language-Team: French (http://www.transifex.com/openstack/keystone/language/"
"fr/)\n"
"Language: fr\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
-"Generated-By: Babel 1.3\n"
+"Generated-By: Babel 2.0\n"
"Plural-Forms: nplurals=2; plural=(n > 1);\n"
-#: keystone/cli.py:159
-msgid "keystone-manage pki_setup is not recommended for production use."
-msgstr ""
-"keystone-manage pki_setup n'est pas recommandé pour une utilisation en "
-"production."
-
-#: keystone/cli.py:178
-msgid "keystone-manage ssl_setup is not recommended for production use."
-msgstr ""
-"keystone-manage ssl_setup n'est pas recommandé pour une utilisation en "
-"production."
-
-#: keystone/cli.py:493
-#, python-format
-msgid "Ignoring file (%s) while scanning domain config directory"
-msgstr ""
-
-#: keystone/exception.py:49
-msgid "missing exception kwargs (programmer error)"
-msgstr ""
-
-#: keystone/assignment/controllers.py:60
-#, python-format
-msgid "Authentication failed: %s"
-msgstr "L'authentification a échoué: %s"
-
-#: keystone/assignment/controllers.py:576
-#, python-format
-msgid ""
-"Group %(group)s not found for role-assignment - %(target)s with Role: "
-"%(role)s"
-msgstr ""
-
-#: keystone/auth/controllers.py:449
-#, python-format
-msgid ""
-"User %(user_id)s doesn't have access to default project %(project_id)s. The "
-"token will be unscoped rather than scoped to the project."
-msgstr ""
-
-#: keystone/auth/controllers.py:457
-#, python-format
-msgid ""
-"User %(user_id)s's default project %(project_id)s is disabled. The token "
-"will be unscoped rather than scoped to the project."
-msgstr ""
-
-#: keystone/auth/controllers.py:466
-#, python-format
-msgid ""
-"User %(user_id)s's default project %(project_id)s not found. The token will "
-"be unscoped rather than scoped to the project."
-msgstr ""
-
-#: keystone/common/authorization.py:55
-msgid "RBAC: Invalid user data in token"
-msgstr "RBAC: Donnée utilisation non valide dans le token"
-
-#: keystone/common/controller.py:79 keystone/middleware/core.py:224
-msgid "RBAC: Invalid token"
-msgstr "RBAC : Jeton non valide"
-
-#: keystone/common/controller.py:104 keystone/common/controller.py:201
-#: keystone/common/controller.py:740
-msgid "RBAC: Bypassing authorization"
-msgstr "RBAC : Autorisation ignorée"
-
-#: keystone/common/controller.py:669 keystone/common/controller.py:704
-msgid "Invalid token found while getting domain ID for list request"
-msgstr ""
-
-#: keystone/common/controller.py:677
-msgid "No domain information specified as part of list request"
-msgstr ""
-
-#: keystone/common/utils.py:103
-#, python-format
-msgid "Truncating user password to %d characters."
-msgstr ""
-
-#: keystone/common/wsgi.py:242
-#, python-format
-msgid "Authorization failed. %(exception)s from %(remote_addr)s"
-msgstr "Echec d'autorisation. %(exception)s depuis %(remote_addr)s"
-
-#: keystone/common/wsgi.py:361
-msgid "Invalid token in _get_trust_id_for_request"
-msgstr "Jeton invalide dans _get_trust_id_for_request"
-
-#: keystone/common/cache/backends/mongo.py:403
-#, python-format
-msgid ""
-"TTL index already exists on db collection <%(c_name)s>, remove index <"
-"%(indx_name)s> first to make updated mongo_ttl_seconds value to be effective"
-msgstr ""
-
-#: keystone/common/kvs/core.py:134
#, python-format
msgid "%s is not a dogpile.proxy.ProxyBackend"
msgstr "%s n'est pas un dogpile.proxy.ProxyBackend"
-#: keystone/common/kvs/core.py:403
#, python-format
-msgid "KVS lock released (timeout reached) for: %s"
-msgstr "Verrou KVS relaché (temps limite atteint) pour : %s"
-
-#: keystone/common/ldap/core.py:1026
-msgid ""
-"LDAP Server does not support paging. Disable paging in keystone.conf to "
-"avoid this message."
-msgstr ""
-"Le serveur LDAP ne prend pas en charge la pagination. Désactivez la "
-"pagination dans keystone.conf pour éviter de recevoir ce message."
+msgid "Authorization failed. %(exception)s from %(remote_addr)s"
+msgstr "Echec d'autorisation. %(exception)s depuis %(remote_addr)s"
-#: keystone/common/ldap/core.py:1225
#, python-format
msgid ""
-"Invalid additional attribute mapping: \"%s\". Format must be "
-"<ldap_attribute>:<keystone_attribute>"
+"Endpoint %(endpoint_id)s referenced in association for policy %(policy_id)s "
+"not found."
msgstr ""
-"Mauvais mappage d'attribut additionnel: \"%s\". Le format doit être "
-"<ldap_attribute>:<keystone_attribute>"
+"Le point d'entrée %(endpoint_id)s référencé en association avec la politique "
+"%(policy_id)s est introuvable."
-#: keystone/common/ldap/core.py:1336
#, python-format
msgid ""
"ID attribute %(id_attr)s for LDAP object %(dn)s has multiple values and "
@@ -154,150 +46,56 @@ msgstr ""
"par conséquent ne peut être utilisé comme un ID. Obtention de l'ID depuis le "
"DN à la place."
-#: keystone/common/ldap/core.py:1669
#, python-format
msgid ""
-"When deleting entries for %(search_base)s, could not delete nonexistent "
-"entries %(entries)s%(dots)s"
+"Invalid additional attribute mapping: \"%s\". Format must be "
+"<ldap_attribute>:<keystone_attribute>"
msgstr ""
+"Mauvais mappage d'attribut additionnel: \"%s\". Le format doit être "
+"<ldap_attribute>:<keystone_attribute>"
-#: keystone/contrib/endpoint_policy/core.py:91
#, python-format
-msgid ""
-"Endpoint %(endpoint_id)s referenced in association for policy %(policy_id)s "
-"not found."
-msgstr ""
-"Le point d'entrée %(endpoint_id)s référencé en association avec la politique "
-"%(policy_id)s est introuvable."
+msgid "Invalid domain name (%s) found in config file name"
+msgstr "Non de domaine trouvé non valide (%s) dans le fichier de configuration"
-#: keystone/contrib/endpoint_policy/core.py:179
#, python-format
-msgid ""
-"Unsupported policy association found - Policy %(policy_id)s, Endpoint "
-"%(endpoint_id)s, Service %(service_id)s, Region %(region_id)s, "
-msgstr ""
+msgid "KVS lock released (timeout reached) for: %s"
+msgstr "Verrou KVS relaché (temps limite atteint) pour : %s"
-#: keystone/contrib/endpoint_policy/core.py:195
-#, python-format
msgid ""
-"Policy %(policy_id)s referenced in association for endpoint %(endpoint_id)s "
-"not found."
+"LDAP Server does not support paging. Disable paging in keystone.conf to "
+"avoid this message."
msgstr ""
+"Le serveur LDAP ne prend pas en charge la pagination. Désactivez la "
+"pagination dans keystone.conf pour éviter de recevoir ce message."
-#: keystone/contrib/federation/utils.py:200
-#, python-format
-msgid "Impossible to identify the IdP %s "
-msgstr ""
+msgid "RBAC: Bypassing authorization"
+msgstr "RBAC : Autorisation ignorée"
-#: keystone/contrib/federation/utils.py:523
-msgid "Ignoring user name"
-msgstr ""
+msgid "RBAC: Invalid token"
+msgstr "RBAC : Jeton non valide"
-#: keystone/identity/controllers.py:139
-#, python-format
-msgid "Unable to remove user %(user)s from %(tenant)s."
-msgstr "Impossible de supprimer l'utilisateur %(user)s depuis %(tenant)s."
+msgid "RBAC: Invalid user data in token"
+msgstr "RBAC: Donnée utilisation non valide dans le token"
-#: keystone/identity/controllers.py:158
#, python-format
msgid "Unable to add user %(user)s to %(tenant)s."
msgstr "Impossible d'ajouter l'utilisateur %(user)s à %(tenant)s."
-#: keystone/identity/core.py:122
-#, python-format
-msgid "Invalid domain name (%s) found in config file name"
-msgstr "Non de domaine trouvé non valide (%s) dans le fichier de configuration"
-
-#: keystone/identity/core.py:160
#, python-format
msgid "Unable to locate domain config directory: %s"
msgstr "Impossible de localiser le répertoire de configuration domaine: %s"
-#: keystone/middleware/core.py:149
-msgid ""
-"XML support has been removed as of the Kilo release and should not be "
-"referenced or used in deployment. Please remove references to "
-"XmlBodyMiddleware from your configuration. This compatibility stub will be "
-"removed in the L release"
-msgstr ""
-
-#: keystone/middleware/core.py:234
-msgid "Auth context already exists in the request environment"
-msgstr ""
-
-#: keystone/openstack/common/loopingcall.py:87
-#, python-format
-msgid "task %(func_name)r run outlasted interval by %(delay).2f sec"
-msgstr ""
-
-#: keystone/openstack/common/service.py:351
#, python-format
-msgid "pid %d not in child list"
-msgstr "PID %d absent de la liste d'enfants"
-
-#: keystone/resource/core.py:1214
-#, python-format
-msgid ""
-"Found what looks like an unmatched config option substitution reference - "
-"domain: %(domain)s, group: %(group)s, option: %(option)s, value: %(value)s. "
-"Perhaps the config option to which it refers has yet to be added?"
-msgstr ""
-
-#: keystone/resource/core.py:1221
-#, python-format
-msgid ""
-"Found what looks like an incorrectly constructed config option substitution "
-"reference - domain: %(domain)s, group: %(group)s, option: %(option)s, value: "
-"%(value)s."
-msgstr ""
-
-#: keystone/token/persistence/core.py:228
-#, python-format
-msgid ""
-"`token_api.%s` is deprecated as of Juno in favor of utilizing methods on "
-"`token_provider_api` and may be removed in Kilo."
-msgstr ""
-
-#: keystone/token/persistence/backends/kvs.py:57
-msgid ""
-"It is recommended to only use the base key-value-store implementation for "
-"the token driver for testing purposes. Please use keystone.token.persistence."
-"backends.memcache.Token or keystone.token.persistence.backends.sql.Token "
-"instead."
-msgstr ""
-
-#: keystone/token/persistence/backends/kvs.py:206
-#, python-format
-msgid "Token `%s` is expired, not adding to the revocation list."
-msgstr ""
-
-#: keystone/token/persistence/backends/kvs.py:240
-#, python-format
-msgid ""
-"Removing `%s` from revocation list due to invalid expires data in revocation "
-"list."
-msgstr ""
-
-#: keystone/token/providers/fernet/utils.py:46
-#, python-format
-msgid "[fernet_tokens] key_repository is world readable: %s"
-msgstr ""
-
-#: keystone/token/providers/fernet/utils.py:90
-#, python-format
-msgid ""
-"Unable to change the ownership of [fernet_tokens] key_repository without a "
-"keystone user ID and keystone group ID both being provided: %s"
-msgstr ""
+msgid "Unable to remove user %(user)s from %(tenant)s."
+msgstr "Impossible de supprimer l'utilisateur %(user)s depuis %(tenant)s."
-#: keystone/token/providers/fernet/utils.py:112
-#, python-format
-msgid ""
-"Unable to change the ownership of the new key without a keystone user ID and "
-"keystone group ID both being provided: %s"
+msgid "keystone-manage pki_setup is not recommended for production use."
msgstr ""
+"keystone-manage pki_setup n'est pas recommandé pour une utilisation en "
+"production."
-#: keystone/token/providers/fernet/utils.py:204
-msgid ""
-"[fernet_tokens] max_active_keys must be at least 1 to maintain a primary key."
+msgid "keystone-manage ssl_setup is not recommended for production use."
msgstr ""
+"keystone-manage ssl_setup n'est pas recommandé pour une utilisation en "
+"production."
diff --git a/keystone-moon/keystone/locale/hu/LC_MESSAGES/keystone-log-critical.po b/keystone-moon/keystone/locale/hu/LC_MESSAGES/keystone-log-critical.po
index 767c150e..102329f6 100644
--- a/keystone-moon/keystone/locale/hu/LC_MESSAGES/keystone-log-critical.po
+++ b/keystone-moon/keystone/locale/hu/LC_MESSAGES/keystone-log-critical.po
@@ -1,5 +1,5 @@
# Translations template for keystone.
-# Copyright (C) 2014 OpenStack Foundation
+# Copyright (C) 2015 OpenStack Foundation
# This file is distributed under the same license as the keystone project.
#
# Translators:
@@ -7,19 +7,18 @@ msgid ""
msgstr ""
"Project-Id-Version: Keystone\n"
"Report-Msgid-Bugs-To: https://bugs.launchpad.net/keystone\n"
-"POT-Creation-Date: 2014-09-07 06:06+0000\n"
+"POT-Creation-Date: 2015-08-06 06:28+0000\n"
"PO-Revision-Date: 2014-08-31 15:19+0000\n"
"Last-Translator: openstackjenkins <jenkins@openstack.org>\n"
-"Language-Team: Hungarian (http://www.transifex.com/projects/p/keystone/"
+"Language-Team: Hungarian (http://www.transifex.com/openstack/keystone/"
"language/hu/)\n"
"Language: hu\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
-"Generated-By: Babel 1.3\n"
+"Generated-By: Babel 2.0\n"
"Plural-Forms: nplurals=2; plural=(n != 1);\n"
-#: keystone/catalog/backends/templated.py:106
#, python-format
msgid "Unable to open template file %s"
msgstr "Nem nyitható meg a sablonfájl: %s"
diff --git a/keystone-moon/keystone/locale/it/LC_MESSAGES/keystone-log-critical.po b/keystone-moon/keystone/locale/it/LC_MESSAGES/keystone-log-critical.po
index 35010103..db15042f 100644
--- a/keystone-moon/keystone/locale/it/LC_MESSAGES/keystone-log-critical.po
+++ b/keystone-moon/keystone/locale/it/LC_MESSAGES/keystone-log-critical.po
@@ -1,5 +1,5 @@
# Translations template for keystone.
-# Copyright (C) 2014 OpenStack Foundation
+# Copyright (C) 2015 OpenStack Foundation
# This file is distributed under the same license as the keystone project.
#
# Translators:
@@ -7,19 +7,18 @@ msgid ""
msgstr ""
"Project-Id-Version: Keystone\n"
"Report-Msgid-Bugs-To: https://bugs.launchpad.net/keystone\n"
-"POT-Creation-Date: 2014-09-07 06:06+0000\n"
+"POT-Creation-Date: 2015-08-06 06:28+0000\n"
"PO-Revision-Date: 2014-08-31 15:19+0000\n"
"Last-Translator: openstackjenkins <jenkins@openstack.org>\n"
-"Language-Team: Italian (http://www.transifex.com/projects/p/keystone/"
-"language/it/)\n"
+"Language-Team: Italian (http://www.transifex.com/openstack/keystone/language/"
+"it/)\n"
"Language: it\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
-"Generated-By: Babel 1.3\n"
+"Generated-By: Babel 2.0\n"
"Plural-Forms: nplurals=2; plural=(n != 1);\n"
-#: keystone/catalog/backends/templated.py:106
#, python-format
msgid "Unable to open template file %s"
msgstr "Impossibile aprire il file di template %s"
diff --git a/keystone-moon/keystone/locale/ja/LC_MESSAGES/keystone-log-critical.po b/keystone-moon/keystone/locale/ja/LC_MESSAGES/keystone-log-critical.po
index b83aaad2..e5ec3075 100644
--- a/keystone-moon/keystone/locale/ja/LC_MESSAGES/keystone-log-critical.po
+++ b/keystone-moon/keystone/locale/ja/LC_MESSAGES/keystone-log-critical.po
@@ -1,5 +1,5 @@
# Translations template for keystone.
-# Copyright (C) 2014 OpenStack Foundation
+# Copyright (C) 2015 OpenStack Foundation
# This file is distributed under the same license as the keystone project.
#
# Translators:
@@ -7,19 +7,18 @@ msgid ""
msgstr ""
"Project-Id-Version: Keystone\n"
"Report-Msgid-Bugs-To: https://bugs.launchpad.net/keystone\n"
-"POT-Creation-Date: 2014-09-07 06:06+0000\n"
+"POT-Creation-Date: 2015-08-06 06:28+0000\n"
"PO-Revision-Date: 2014-08-31 15:19+0000\n"
"Last-Translator: openstackjenkins <jenkins@openstack.org>\n"
-"Language-Team: Japanese (http://www.transifex.com/projects/p/keystone/"
+"Language-Team: Japanese (http://www.transifex.com/openstack/keystone/"
"language/ja/)\n"
"Language: ja\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
-"Generated-By: Babel 1.3\n"
+"Generated-By: Babel 2.0\n"
"Plural-Forms: nplurals=1; plural=0;\n"
-#: keystone/catalog/backends/templated.py:106
#, python-format
msgid "Unable to open template file %s"
msgstr "テンプレートファイル %s を開けません"
diff --git a/keystone-moon/keystone/locale/keystone-log-critical.pot b/keystone-moon/keystone/locale/keystone-log-critical.pot
index e07dd7a9..e6a96bf1 100644
--- a/keystone-moon/keystone/locale/keystone-log-critical.pot
+++ b/keystone-moon/keystone/locale/keystone-log-critical.pot
@@ -1,21 +1,21 @@
# Translations template for keystone.
-# Copyright (C) 2014 OpenStack Foundation
+# Copyright (C) 2015 OpenStack Foundation
# This file is distributed under the same license as the keystone project.
-# FIRST AUTHOR <EMAIL@ADDRESS>, 2014.
+# FIRST AUTHOR <EMAIL@ADDRESS>, 2015.
#
#, fuzzy
msgid ""
msgstr ""
-"Project-Id-Version: keystone 2014.2.dev28.g7e410ae\n"
+"Project-Id-Version: keystone 8.0.0.0b3.dev14\n"
"Report-Msgid-Bugs-To: https://bugs.launchpad.net/keystone\n"
-"POT-Creation-Date: 2014-09-07 06:06+0000\n"
+"POT-Creation-Date: 2015-08-01 06:07+0000\n"
"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n"
"Last-Translator: FULL NAME <EMAIL@ADDRESS>\n"
"Language-Team: LANGUAGE <LL@li.org>\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=utf-8\n"
"Content-Transfer-Encoding: 8bit\n"
-"Generated-By: Babel 1.3\n"
+"Generated-By: Babel 2.0\n"
#: keystone/catalog/backends/templated.py:106
#, python-format
diff --git a/keystone-moon/keystone/locale/keystone-log-error.pot b/keystone-moon/keystone/locale/keystone-log-error.pot
index bca25a19..375fb4b8 100644
--- a/keystone-moon/keystone/locale/keystone-log-error.pot
+++ b/keystone-moon/keystone/locale/keystone-log-error.pot
@@ -6,49 +6,49 @@
#, fuzzy
msgid ""
msgstr ""
-"Project-Id-Version: keystone 2015.1.dev362\n"
+"Project-Id-Version: keystone 8.0.0.0b3.dev14\n"
"Report-Msgid-Bugs-To: https://bugs.launchpad.net/keystone\n"
-"POT-Creation-Date: 2015-03-09 06:03+0000\n"
+"POT-Creation-Date: 2015-08-01 06:07+0000\n"
"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n"
"Last-Translator: FULL NAME <EMAIL@ADDRESS>\n"
"Language-Team: LANGUAGE <LL@li.org>\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=utf-8\n"
"Content-Transfer-Encoding: 8bit\n"
-"Generated-By: Babel 1.3\n"
+"Generated-By: Babel 2.0\n"
-#: keystone/notifications.py:304
+#: keystone/notifications.py:396
msgid "Failed to construct notifier"
msgstr ""
-#: keystone/notifications.py:389
+#: keystone/notifications.py:491
#, python-format
msgid "Failed to send %(res_id)s %(event_type)s notification"
msgstr ""
-#: keystone/notifications.py:606
+#: keystone/notifications.py:760
#, python-format
msgid "Failed to send %(action)s %(event_type)s notification"
msgstr ""
-#: keystone/catalog/core.py:62
+#: keystone/catalog/core.py:63
#, python-format
msgid "Malformed endpoint - %(url)r is not a string"
msgstr ""
-#: keystone/catalog/core.py:66
+#: keystone/catalog/core.py:68
#, python-format
msgid "Malformed endpoint %(url)s - unknown key %(keyerror)s"
msgstr ""
-#: keystone/catalog/core.py:71
+#: keystone/catalog/core.py:76
#, python-format
msgid ""
"Malformed endpoint '%(url)s'. The following type error occurred during "
"string substitution: %(typeerror)s"
msgstr ""
-#: keystone/catalog/core.py:77
+#: keystone/catalog/core.py:82
#, python-format
msgid ""
"Malformed endpoint %s - incomplete format (are you missing a type "
@@ -65,7 +65,7 @@ msgstr ""
msgid "Failed to remove file %(file_path)r: %(error)s"
msgstr ""
-#: keystone/common/utils.py:239
+#: keystone/common/utils.py:241
msgid ""
"Error setting up the debug environment. Verify that the option --debug-"
"url has the format <host>:<port> and that a debugger processes is "
@@ -79,24 +79,16 @@ msgid ""
"Skipping unknown format: %s"
msgstr ""
-#: keystone/common/environment/eventlet_server.py:99
+#: keystone/common/environment/eventlet_server.py:112
#, python-format
msgid "Could not bind to %(host)s:%(port)s"
msgstr ""
-#: keystone/common/environment/eventlet_server.py:185
+#: keystone/common/environment/eventlet_server.py:205
msgid "Server error"
msgstr ""
-#: keystone/contrib/endpoint_policy/core.py:129
-#: keystone/contrib/endpoint_policy/core.py:228
-#, python-format
-msgid ""
-"Circular reference or a repeated entry found in region tree - "
-"%(region_id)s."
-msgstr ""
-
-#: keystone/contrib/federation/idp.py:410
+#: keystone/contrib/federation/idp.py:428
#, python-format
msgid "Error when signing assertion, reason: %(reason)s"
msgstr ""
@@ -105,45 +97,40 @@ msgstr ""
msgid "Cannot retrieve Authorization headers"
msgstr ""
-#: keystone/openstack/common/loopingcall.py:95
-msgid "in fixed duration looping call"
-msgstr ""
-
-#: keystone/openstack/common/loopingcall.py:138
-msgid "in dynamic looping call"
-msgstr ""
-
-#: keystone/openstack/common/service.py:268
-msgid "Unhandled exception"
+#: keystone/endpoint_policy/core.py:132 keystone/endpoint_policy/core.py:231
+#, python-format
+msgid ""
+"Circular reference or a repeated entry found in region tree - "
+"%(region_id)s."
msgstr ""
-#: keystone/resource/core.py:477
+#: keystone/resource/core.py:485
#, python-format
msgid ""
"Circular reference or a repeated entry found projects hierarchy - "
"%(project_id)s."
msgstr ""
-#: keystone/resource/core.py:939
+#: keystone/resource/core.py:950
#, python-format
msgid ""
"Unexpected results in response for domain config - %(count)s responses, "
"first option is %(option)s, expected option %(expected)s"
msgstr ""
-#: keystone/resource/backends/sql.py:102 keystone/resource/backends/sql.py:121
+#: keystone/resource/backends/sql.py:101 keystone/resource/backends/sql.py:120
#, python-format
msgid ""
"Circular reference or a repeated entry found in projects hierarchy - "
"%(project_id)s."
msgstr ""
-#: keystone/token/provider.py:292
+#: keystone/token/provider.py:284
#, python-format
msgid "Unexpected error or malformed token determining token expiry: %s"
msgstr ""
-#: keystone/token/persistence/backends/kvs.py:226
+#: keystone/token/persistence/backends/kvs.py:225
#, python-format
msgid ""
"Reinitializing revocation list due to error in loading revocation list "
@@ -151,7 +138,7 @@ msgid ""
"data: %(list)r"
msgstr ""
-#: keystone/token/providers/common.py:611
+#: keystone/token/providers/common.py:678
msgid "Failed to validate token"
msgstr ""
@@ -166,6 +153,11 @@ msgid ""
" have sufficient permission to access it: %s"
msgstr ""
+#: keystone/token/providers/fernet/utils.py:62
+#, python-format
+msgid "Unable to convert Keystone user or group ID. Error: %s"
+msgstr ""
+
#: keystone/token/providers/fernet/utils.py:79
msgid ""
"Failed to create [fernet_tokens] key_repository: either it already exists"
diff --git a/keystone-moon/keystone/locale/keystone-log-info.pot b/keystone-moon/keystone/locale/keystone-log-info.pot
index 17abd1df..f4c52cd4 100644
--- a/keystone-moon/keystone/locale/keystone-log-info.pot
+++ b/keystone-moon/keystone/locale/keystone-log-info.pot
@@ -6,16 +6,16 @@
#, fuzzy
msgid ""
msgstr ""
-"Project-Id-Version: keystone 2015.1.dev362\n"
+"Project-Id-Version: keystone 8.0.0.0b3.dev45\n"
"Report-Msgid-Bugs-To: https://bugs.launchpad.net/keystone\n"
-"POT-Creation-Date: 2015-03-09 06:03+0000\n"
+"POT-Creation-Date: 2015-08-06 06:28+0000\n"
"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n"
"Last-Translator: FULL NAME <EMAIL@ADDRESS>\n"
"Language-Team: LANGUAGE <LL@li.org>\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=utf-8\n"
"Content-Transfer-Encoding: 8bit\n"
-"Generated-By: Babel 1.3\n"
+"Generated-By: Babel 2.0\n"
#: keystone/assignment/core.py:250
#, python-format
@@ -27,11 +27,7 @@ msgstr ""
msgid "Creating the default role %s failed because it was already created"
msgstr ""
-#: keystone/auth/controllers.py:64
-msgid "Loading auth-plugins by class-name is deprecated."
-msgstr ""
-
-#: keystone/auth/controllers.py:106
+#: keystone/auth/controllers.py:109
#, python-format
msgid ""
"\"expires_at\" has conflicting values %(existing)s and %(new)s. Will use"
@@ -43,124 +39,74 @@ msgstr ""
msgid "Running command - %s"
msgstr ""
-#: keystone/common/wsgi.py:79
+#: keystone/common/wsgi.py:82
msgid "No bind information present in token"
msgstr ""
-#: keystone/common/wsgi.py:83
+#: keystone/common/wsgi.py:86
#, python-format
msgid "Named bind mode %s not in bind information"
msgstr ""
-#: keystone/common/wsgi.py:90
+#: keystone/common/wsgi.py:93
msgid "Kerberos credentials required and not present"
msgstr ""
-#: keystone/common/wsgi.py:94
+#: keystone/common/wsgi.py:97
msgid "Kerberos credentials do not match those in bind"
msgstr ""
-#: keystone/common/wsgi.py:98
+#: keystone/common/wsgi.py:101
msgid "Kerberos bind authentication successful"
msgstr ""
-#: keystone/common/wsgi.py:105
+#: keystone/common/wsgi.py:108
#, python-format
msgid "Couldn't verify unknown bind: {%(bind_type)s: %(identifier)s}"
msgstr ""
-#: keystone/common/environment/eventlet_server.py:103
+#: keystone/common/environment/eventlet_server.py:116
#, python-format
msgid "Starting %(arg0)s on %(host)s:%(port)s"
msgstr ""
-#: keystone/common/kvs/core.py:138
+#: keystone/common/kvs/core.py:137
#, python-format
msgid "Adding proxy '%(proxy)s' to KVS %(name)s."
msgstr ""
-#: keystone/common/kvs/core.py:188
+#: keystone/common/kvs/core.py:187
#, python-format
msgid "Using %(func)s as KVS region %(name)s key_mangler"
msgstr ""
-#: keystone/common/kvs/core.py:200
+#: keystone/common/kvs/core.py:199
#, python-format
msgid "Using default dogpile sha1_mangle_key as KVS region %s key_mangler"
msgstr ""
-#: keystone/common/kvs/core.py:210
+#: keystone/common/kvs/core.py:209
#, python-format
msgid "KVS region %s key_mangler disabled."
msgstr ""
-#: keystone/contrib/example/core.py:64 keystone/contrib/example/core.py:73
+#: keystone/contrib/example/core.py:69 keystone/contrib/example/core.py:78
#, python-format
msgid ""
"Received the following notification: service %(service)s, resource_type: "
"%(resource_type)s, operation %(operation)s payload %(payload)s"
msgstr ""
-#: keystone/openstack/common/eventlet_backdoor.py:146
-#, python-format
-msgid "Eventlet backdoor listening on %(port)s for process %(pid)d"
-msgstr ""
-
-#: keystone/openstack/common/service.py:173
-#, python-format
-msgid "Caught %s, exiting"
-msgstr ""
-
-#: keystone/openstack/common/service.py:231
-msgid "Parent process has died unexpectedly, exiting"
-msgstr ""
-
-#: keystone/openstack/common/service.py:262
-#, python-format
-msgid "Child caught %s, exiting"
-msgstr ""
-
-#: keystone/openstack/common/service.py:301
-msgid "Forking too fast, sleeping"
-msgstr ""
-
-#: keystone/openstack/common/service.py:320
-#, python-format
-msgid "Started child %d"
-msgstr ""
-
-#: keystone/openstack/common/service.py:330
+#: keystone/token/persistence/backends/sql.py:283
#, python-format
-msgid "Starting %d workers"
-msgstr ""
-
-#: keystone/openstack/common/service.py:347
-#, python-format
-msgid "Child %(pid)d killed by signal %(sig)d"
-msgstr ""
-
-#: keystone/openstack/common/service.py:351
-#, python-format
-msgid "Child %(pid)s exited with status %(code)d"
-msgstr ""
-
-#: keystone/openstack/common/service.py:390
-#, python-format
-msgid "Caught %s, stopping children"
-msgstr ""
-
-#: keystone/openstack/common/service.py:399
-msgid "Wait called after thread killed. Cleaning up."
-msgstr ""
-
-#: keystone/openstack/common/service.py:415
-#, python-format
-msgid "Waiting on %d children to exit"
+msgid "Total expired tokens removed: %d"
msgstr ""
-#: keystone/token/persistence/backends/sql.py:279
+#: keystone/token/providers/fernet/token_formatters.py:163
#, python-format
-msgid "Total expired tokens removed: %d"
+msgid ""
+"Fernet token created with length of %d characters, which exceeds 255 "
+"characters"
msgstr ""
#: keystone/token/providers/fernet/utils.py:72
@@ -178,33 +124,33 @@ msgstr ""
msgid "Key repository is already initialized; aborting."
msgstr ""
-#: keystone/token/providers/fernet/utils.py:179
+#: keystone/token/providers/fernet/utils.py:184
#, python-format
msgid "Starting key rotation with %(count)s key files: %(list)s"
msgstr ""
-#: keystone/token/providers/fernet/utils.py:185
+#: keystone/token/providers/fernet/utils.py:190
#, python-format
msgid "Current primary key is: %s"
msgstr ""
-#: keystone/token/providers/fernet/utils.py:187
+#: keystone/token/providers/fernet/utils.py:192
#, python-format
msgid "Next primary key will be: %s"
msgstr ""
-#: keystone/token/providers/fernet/utils.py:197
+#: keystone/token/providers/fernet/utils.py:202
#, python-format
msgid "Promoted key 0 to be the primary: %s"
msgstr ""
-#: keystone/token/providers/fernet/utils.py:213
+#: keystone/token/providers/fernet/utils.py:223
#, python-format
-msgid "Excess keys to purge: %s"
+msgid "Excess key to purge: %s"
msgstr ""
-#: keystone/token/providers/fernet/utils.py:237
+#: keystone/token/providers/fernet/utils.py:257
#, python-format
-msgid "Loaded %(count)s encryption keys from: %(dir)s"
+msgid "Loaded %(count)d encryption keys (max_active_keys=%(max)d) from: %(dir)s"
msgstr ""
diff --git a/keystone-moon/keystone/locale/keystone-log-warning.pot b/keystone-moon/keystone/locale/keystone-log-warning.pot
index ddf2931c..1109bcbe 100644
--- a/keystone-moon/keystone/locale/keystone-log-warning.pot
+++ b/keystone-moon/keystone/locale/keystone-log-warning.pot
@@ -6,103 +6,91 @@
#, fuzzy
msgid ""
msgstr ""
-"Project-Id-Version: keystone 2015.1.dev497\n"
+"Project-Id-Version: keystone 8.0.0.0b3.dev122\n"
"Report-Msgid-Bugs-To: https://bugs.launchpad.net/keystone\n"
-"POT-Creation-Date: 2015-03-19 06:04+0000\n"
+"POT-Creation-Date: 2015-08-16 06:06+0000\n"
"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n"
"Last-Translator: FULL NAME <EMAIL@ADDRESS>\n"
"Language-Team: LANGUAGE <LL@li.org>\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=utf-8\n"
"Content-Transfer-Encoding: 8bit\n"
-"Generated-By: Babel 1.3\n"
+"Generated-By: Babel 2.0\n"
-#: keystone/cli.py:159
-msgid "keystone-manage pki_setup is not recommended for production use."
-msgstr ""
-
-#: keystone/cli.py:178
-msgid "keystone-manage ssl_setup is not recommended for production use."
-msgstr ""
-
-#: keystone/cli.py:493
-#, python-format
-msgid "Ignoring file (%s) while scanning domain config directory"
-msgstr ""
-
-#: keystone/exception.py:49
+#: keystone/exception.py:48
msgid "missing exception kwargs (programmer error)"
msgstr ""
-#: keystone/assignment/controllers.py:60
-#, python-format
-msgid "Authentication failed: %s"
-msgstr ""
-
-#: keystone/assignment/controllers.py:576
-#, python-format
-msgid ""
-"Group %(group)s not found for role-assignment - %(target)s with Role: "
-"%(role)s"
-msgstr ""
-
-#: keystone/auth/controllers.py:449
+#: keystone/auth/controllers.py:446
#, python-format
msgid ""
"User %(user_id)s doesn't have access to default project %(project_id)s. "
"The token will be unscoped rather than scoped to the project."
msgstr ""
-#: keystone/auth/controllers.py:457
+#: keystone/auth/controllers.py:454
#, python-format
msgid ""
"User %(user_id)s's default project %(project_id)s is disabled. The token "
"will be unscoped rather than scoped to the project."
msgstr ""
-#: keystone/auth/controllers.py:466
+#: keystone/auth/controllers.py:463
#, python-format
msgid ""
"User %(user_id)s's default project %(project_id)s not found. The token "
"will be unscoped rather than scoped to the project."
msgstr ""
+#: keystone/cmd/cli.py:158
+msgid "keystone-manage pki_setup is not recommended for production use."
+msgstr ""
+
+#: keystone/cmd/cli.py:177
+msgid "keystone-manage ssl_setup is not recommended for production use."
+msgstr ""
+
+#: keystone/cmd/cli.py:483
+#, python-format
+msgid "Ignoring file (%s) while scanning domain config directory"
+msgstr ""
+
#: keystone/common/authorization.py:55
msgid "RBAC: Invalid user data in token"
msgstr ""
-#: keystone/common/controller.py:79 keystone/middleware/core.py:224
+#: keystone/common/controller.py:83 keystone/middleware/core.py:194
msgid "RBAC: Invalid token"
msgstr ""
-#: keystone/common/controller.py:104 keystone/common/controller.py:201
-#: keystone/common/controller.py:740
+#: keystone/common/controller.py:108 keystone/common/controller.py:205
+#: keystone/common/controller.py:755
msgid "RBAC: Bypassing authorization"
msgstr ""
-#: keystone/common/controller.py:669 keystone/common/controller.py:704
-msgid "Invalid token found while getting domain ID for list request"
+#: keystone/common/controller.py:710
+msgid "No domain information specified as part of list request"
msgstr ""
-#: keystone/common/controller.py:677
-msgid "No domain information specified as part of list request"
+#: keystone/common/openssl.py:73
+msgid "Failed to invoke ``openssl version``, assuming is v1.0 or newer"
msgstr ""
-#: keystone/common/utils.py:103
+#: keystone/common/utils.py:105
#, python-format
msgid "Truncating user password to %d characters."
msgstr ""
-#: keystone/common/wsgi.py:242
-#, python-format
-msgid "Authorization failed. %(exception)s from %(remote_addr)s"
+#: keystone/common/utils.py:527
+msgid "Couldn't find the auth context."
msgstr ""
-#: keystone/common/wsgi.py:361
-msgid "Invalid token in _get_trust_id_for_request"
+#: keystone/common/wsgi.py:243
+#, python-format
+msgid "Authorization failed. %(exception)s from %(remote_addr)s"
msgstr ""
-#: keystone/common/cache/backends/mongo.py:403
+#: keystone/common/cache/backends/mongo.py:407
#, python-format
msgid ""
"TTL index already exists on db collection <%(c_name)s>, remove index "
@@ -110,79 +98,74 @@ msgid ""
"effective"
msgstr ""
-#: keystone/common/kvs/core.py:134
+#: keystone/common/kvs/core.py:133
#, python-format
msgid "%s is not a dogpile.proxy.ProxyBackend"
msgstr ""
-#: keystone/common/kvs/core.py:403
+#: keystone/common/kvs/core.py:402
#, python-format
msgid "KVS lock released (timeout reached) for: %s"
msgstr ""
-#: keystone/common/ldap/core.py:1026
+#: keystone/common/ldap/core.py:1029
msgid ""
"LDAP Server does not support paging. Disable paging in keystone.conf to "
"avoid this message."
msgstr ""
-#: keystone/common/ldap/core.py:1225
+#: keystone/common/ldap/core.py:1224
#, python-format
msgid ""
"Invalid additional attribute mapping: \"%s\". Format must be "
"<ldap_attribute>:<keystone_attribute>"
msgstr ""
-#: keystone/common/ldap/core.py:1336
+#: keystone/common/ldap/core.py:1335
#, python-format
msgid ""
"ID attribute %(id_attr)s for LDAP object %(dn)s has multiple values and "
"therefore cannot be used as an ID. Will get the ID from DN instead"
msgstr ""
-#: keystone/common/ldap/core.py:1669
+#: keystone/common/ldap/core.py:1668
#, python-format
msgid ""
"When deleting entries for %(search_base)s, could not delete nonexistent "
"entries %(entries)s%(dots)s"
msgstr ""
-#: keystone/contrib/endpoint_policy/core.py:91
+#: keystone/contrib/federation/utils.py:545
+msgid "Ignoring user name"
+msgstr ""
+
+#: keystone/endpoint_policy/core.py:94
#, python-format
msgid ""
"Endpoint %(endpoint_id)s referenced in association for policy "
"%(policy_id)s not found."
msgstr ""
-#: keystone/contrib/endpoint_policy/core.py:179
+#: keystone/endpoint_policy/core.py:182
#, python-format
msgid ""
"Unsupported policy association found - Policy %(policy_id)s, Endpoint "
"%(endpoint_id)s, Service %(service_id)s, Region %(region_id)s, "
msgstr ""
-#: keystone/contrib/endpoint_policy/core.py:195
+#: keystone/endpoint_policy/core.py:198
#, python-format
msgid ""
"Policy %(policy_id)s referenced in association for endpoint "
"%(endpoint_id)s not found."
msgstr ""
-#: keystone/contrib/federation/utils.py:200
-#, python-format
-msgid "Impossible to identify the IdP %s "
-msgstr ""
-
-#: keystone/contrib/federation/utils.py:523
-msgid "Ignoring user name"
-msgstr ""
-
-#: keystone/identity/controllers.py:139
+#: keystone/identity/controllers.py:141
#, python-format
msgid "Unable to remove user %(user)s from %(tenant)s."
msgstr ""
-#: keystone/identity/controllers.py:158
+#: keystone/identity/controllers.py:160
#, python-format
msgid "Unable to add user %(user)s to %(tenant)s."
msgstr ""
@@ -197,29 +180,18 @@ msgstr ""
msgid "Unable to locate domain config directory: %s"
msgstr ""
-#: keystone/middleware/core.py:149
+#: keystone/identity/core.py:602
+#, python-format
msgid ""
-"XML support has been removed as of the Kilo release and should not be "
-"referenced or used in deployment. Please remove references to "
-"XmlBodyMiddleware from your configuration. This compatibility stub will "
-"be removed in the L release"
+"Found multiple domains being mapped to a driver that does not support "
+"that (e.g. LDAP) - Domain ID: %(domain)s, Default Driver: %(driver)s"
msgstr ""
-#: keystone/middleware/core.py:234
+#: keystone/middleware/core.py:204
msgid "Auth context already exists in the request environment"
msgstr ""
-#: keystone/openstack/common/loopingcall.py:87
-#, python-format
-msgid "task %(func_name)r run outlasted interval by %(delay).2f sec"
-msgstr ""
-
-#: keystone/openstack/common/service.py:351
-#, python-format
-msgid "pid %d not in child list"
-msgstr ""
-
-#: keystone/resource/core.py:1214
+#: keystone/resource/core.py:1237
#, python-format
msgid ""
"Found what looks like an unmatched config option substitution reference -"
@@ -228,7 +200,7 @@ msgid ""
"added?"
msgstr ""
-#: keystone/resource/core.py:1221
+#: keystone/resource/core.py:1244
#, python-format
msgid ""
"Found what looks like an incorrectly constructed config option "
@@ -236,27 +208,26 @@ msgid ""
"%(option)s, value: %(value)s."
msgstr ""
-#: keystone/token/persistence/core.py:228
+#: keystone/token/persistence/core.py:225
#, python-format
msgid ""
"`token_api.%s` is deprecated as of Juno in favor of utilizing methods on "
"`token_provider_api` and may be removed in Kilo."
msgstr ""
-#: keystone/token/persistence/backends/kvs.py:57
+#: keystone/token/persistence/backends/kvs.py:58
msgid ""
"It is recommended to only use the base key-value-store implementation for"
-" the token driver for testing purposes. Please use "
-"keystone.token.persistence.backends.memcache.Token or "
-"keystone.token.persistence.backends.sql.Token instead."
+" the token driver for testing purposes. Please use 'memcache' or 'sql' "
+"instead."
msgstr ""
-#: keystone/token/persistence/backends/kvs.py:206
+#: keystone/token/persistence/backends/kvs.py:205
#, python-format
msgid "Token `%s` is expired, not adding to the revocation list."
msgstr ""
-#: keystone/token/persistence/backends/kvs.py:240
+#: keystone/token/persistence/backends/kvs.py:239
#, python-format
msgid ""
"Removing `%s` from revocation list due to invalid expires data in "
@@ -282,7 +253,7 @@ msgid ""
"and keystone group ID both being provided: %s"
msgstr ""
-#: keystone/token/providers/fernet/utils.py:204
+#: keystone/token/providers/fernet/utils.py:210
msgid ""
"[fernet_tokens] max_active_keys must be at least 1 to maintain a primary "
"key."
diff --git a/keystone-moon/keystone/locale/keystone.pot b/keystone-moon/keystone/locale/keystone.pot
index df46fa72..315891aa 100644
--- a/keystone-moon/keystone/locale/keystone.pot
+++ b/keystone-moon/keystone/locale/keystone.pot
@@ -6,97 +6,18 @@
#, fuzzy
msgid ""
msgstr ""
-"Project-Id-Version: keystone 2015.1.dev497\n"
+"Project-Id-Version: keystone 8.0.0.0b3.dev122\n"
"Report-Msgid-Bugs-To: https://bugs.launchpad.net/keystone\n"
-"POT-Creation-Date: 2015-03-19 06:03+0000\n"
+"POT-Creation-Date: 2015-08-16 06:06+0000\n"
"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n"
"Last-Translator: FULL NAME <EMAIL@ADDRESS>\n"
"Language-Team: LANGUAGE <LL@li.org>\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=utf-8\n"
"Content-Transfer-Encoding: 8bit\n"
-"Generated-By: Babel 1.3\n"
+"Generated-By: Babel 2.0\n"
-#: keystone/clean.py:24
-#, python-format
-msgid "%s cannot be empty."
-msgstr ""
-
-#: keystone/clean.py:26
-#, python-format
-msgid "%(property_name)s cannot be less than %(min_length)s characters."
-msgstr ""
-
-#: keystone/clean.py:31
-#, python-format
-msgid "%(property_name)s should not be greater than %(max_length)s characters."
-msgstr ""
-
-#: keystone/clean.py:40
-#, python-format
-msgid "%(property_name)s is not a %(display_expected_type)s"
-msgstr ""
-
-#: keystone/cli.py:283
-msgid "At least one option must be provided"
-msgstr ""
-
-#: keystone/cli.py:290
-msgid "--all option cannot be mixed with other options"
-msgstr ""
-
-#: keystone/cli.py:301
-#, python-format
-msgid "Unknown domain '%(name)s' specified by --domain-name"
-msgstr ""
-
-#: keystone/cli.py:365 keystone/tests/unit/test_cli.py:213
-msgid "At least one option must be provided, use either --all or --domain-name"
-msgstr ""
-
-#: keystone/cli.py:371 keystone/tests/unit/test_cli.py:229
-msgid "The --all option cannot be used with the --domain-name option"
-msgstr ""
-
-#: keystone/cli.py:397 keystone/tests/unit/test_cli.py:246
-#, python-format
-msgid ""
-"Invalid domain name: %(domain)s found in config file name: %(file)s - "
-"ignoring this file."
-msgstr ""
-
-#: keystone/cli.py:405 keystone/tests/unit/test_cli.py:187
-#, python-format
-msgid ""
-"Domain: %(domain)s already has a configuration defined - ignoring file: "
-"%(file)s."
-msgstr ""
-
-#: keystone/cli.py:419
-#, python-format
-msgid "Error parsing configuration file for domain: %(domain)s, file: %(file)s."
-msgstr ""
-
-#: keystone/cli.py:452
-#, python-format
-msgid ""
-"To get a more detailed information on this error, re-run this command for"
-" the specific domain, i.e.: keystone-manage domain_config_upload "
-"--domain-name %s"
-msgstr ""
-
-#: keystone/cli.py:470
-#, python-format
-msgid "Unable to locate domain config directory: %s"
-msgstr ""
-
-#: keystone/cli.py:503
-msgid ""
-"Unable to access the keystone database, please check it is configured "
-"correctly."
-msgstr ""
-
-#: keystone/exception.py:79
+#: keystone/exception.py:78
#, python-format
msgid ""
"Expecting to find %(attribute)s in %(target)s - the server could not "
@@ -104,26 +25,38 @@ msgid ""
"incorrect. The client is assumed to be in error."
msgstr ""
-#: keystone/exception.py:90
+#: keystone/exception.py:87
#, python-format
-msgid "%(detail)s"
+msgid "Cannot create an endpoint with an invalid URL: %(url)s"
msgstr ""
#: keystone/exception.py:94
+#, python-format
+msgid "%(detail)s"
+msgstr ""
+
+#: keystone/exception.py:98
msgid ""
"Timestamp not in expected format. The server could not comply with the "
"request since it is either malformed or otherwise incorrect. The client "
"is assumed to be in error."
msgstr ""
-#: keystone/exception.py:103
+#: keystone/exception.py:107
+msgid ""
+"The 'expires_at' must not be before now. The server could not comply with"
+" the request since it is either malformed or otherwise incorrect. The "
+"client is assumed to be in error."
+msgstr ""
+
+#: keystone/exception.py:116
#, python-format
msgid ""
"String length exceeded.The length of string '%(string)s' exceeded the "
"limit of column %(type)s(CHAR(%(length)d))."
msgstr ""
-#: keystone/exception.py:109
+#: keystone/exception.py:122
#, python-format
msgid ""
"Request attribute %(attribute)s must be less than or equal to %(size)i. "
@@ -131,88 +64,88 @@ msgid ""
"is invalid (too large). The client is assumed to be in error."
msgstr ""
-#: keystone/exception.py:119
+#: keystone/exception.py:132
#, python-format
msgid ""
"The specified parent region %(parent_region_id)s would create a circular "
"region hierarchy."
msgstr ""
-#: keystone/exception.py:126
+#: keystone/exception.py:139
#, python-format
msgid ""
"The password length must be less than or equal to %(size)i. The server "
"could not comply with the request because the password is invalid."
msgstr ""
-#: keystone/exception.py:134
+#: keystone/exception.py:147
#, python-format
msgid ""
"Unable to delete region %(region_id)s because it or its child regions "
"have associated endpoints."
msgstr ""
-#: keystone/exception.py:141
+#: keystone/exception.py:154
msgid ""
"The certificates you requested are not available. It is likely that this "
"server does not use PKI tokens otherwise this is the result of "
"misconfiguration."
msgstr ""
-#: keystone/exception.py:150
+#: keystone/exception.py:163
msgid "(Disable debug mode to suppress these details.)"
msgstr ""
-#: keystone/exception.py:155
+#: keystone/exception.py:168
#, python-format
msgid "%(message)s %(amendment)s"
msgstr ""
-#: keystone/exception.py:163
+#: keystone/exception.py:176
msgid "The request you have made requires authentication."
msgstr ""
-#: keystone/exception.py:169
+#: keystone/exception.py:182
msgid "Authentication plugin error."
msgstr ""
-#: keystone/exception.py:177
+#: keystone/exception.py:190
#, python-format
msgid "Unable to find valid groups while using mapping %(mapping_id)s"
msgstr ""
-#: keystone/exception.py:182
+#: keystone/exception.py:195
msgid "Attempted to authenticate with an unsupported method."
msgstr ""
-#: keystone/exception.py:190
+#: keystone/exception.py:203
msgid "Additional authentications steps required."
msgstr ""
-#: keystone/exception.py:198
+#: keystone/exception.py:211
msgid "You are not authorized to perform the requested action."
msgstr ""
-#: keystone/exception.py:205
+#: keystone/exception.py:218
#, python-format
msgid "You are not authorized to perform the requested action: %(action)s"
msgstr ""
-#: keystone/exception.py:210
+#: keystone/exception.py:223
#, python-format
msgid ""
"Could not change immutable attribute(s) '%(attributes)s' in target "
"%(target)s"
msgstr ""
-#: keystone/exception.py:215
+#: keystone/exception.py:228
#, python-format
msgid ""
"Group membership across backend boundaries is not allowed, group in "
"question is %(group_id)s, user is %(user_id)s"
msgstr ""
-#: keystone/exception.py:221
+#: keystone/exception.py:234
#, python-format
msgid ""
"Invalid mix of entities for policy association - only Endpoint, Service "
@@ -220,225 +153,229 @@ msgid ""
"Service: %(service_id)s, Region: %(region_id)s"
msgstr ""
-#: keystone/exception.py:228
+#: keystone/exception.py:241
#, python-format
msgid "Invalid domain specific configuration: %(reason)s"
msgstr ""
-#: keystone/exception.py:232
+#: keystone/exception.py:245
#, python-format
msgid "Could not find: %(target)s"
msgstr ""
-#: keystone/exception.py:238
+#: keystone/exception.py:251
#, python-format
msgid "Could not find endpoint: %(endpoint_id)s"
msgstr ""
-#: keystone/exception.py:245
+#: keystone/exception.py:258
msgid "An unhandled exception has occurred: Could not find metadata."
msgstr ""
-#: keystone/exception.py:250
+#: keystone/exception.py:263
#, python-format
msgid "Could not find policy: %(policy_id)s"
msgstr ""
-#: keystone/exception.py:254
+#: keystone/exception.py:267
msgid "Could not find policy association"
msgstr ""
-#: keystone/exception.py:258
+#: keystone/exception.py:271
#, python-format
msgid "Could not find role: %(role_id)s"
msgstr ""
-#: keystone/exception.py:262
+#: keystone/exception.py:275
#, python-format
msgid ""
"Could not find role assignment with role: %(role_id)s, user or group: "
"%(actor_id)s, project or domain: %(target_id)s"
msgstr ""
-#: keystone/exception.py:268
+#: keystone/exception.py:281
#, python-format
msgid "Could not find region: %(region_id)s"
msgstr ""
-#: keystone/exception.py:272
+#: keystone/exception.py:285
#, python-format
msgid "Could not find service: %(service_id)s"
msgstr ""
-#: keystone/exception.py:276
+#: keystone/exception.py:289
#, python-format
msgid "Could not find domain: %(domain_id)s"
msgstr ""
-#: keystone/exception.py:280
+#: keystone/exception.py:293
#, python-format
msgid "Could not find project: %(project_id)s"
msgstr ""
-#: keystone/exception.py:284
+#: keystone/exception.py:297
#, python-format
msgid "Cannot create project with parent: %(project_id)s"
msgstr ""
-#: keystone/exception.py:288
+#: keystone/exception.py:301
#, python-format
msgid "Could not find token: %(token_id)s"
msgstr ""
-#: keystone/exception.py:292
+#: keystone/exception.py:305
#, python-format
msgid "Could not find user: %(user_id)s"
msgstr ""
-#: keystone/exception.py:296
+#: keystone/exception.py:309
#, python-format
msgid "Could not find group: %(group_id)s"
msgstr ""
-#: keystone/exception.py:300
+#: keystone/exception.py:313
#, python-format
msgid "Could not find mapping: %(mapping_id)s"
msgstr ""
-#: keystone/exception.py:304
+#: keystone/exception.py:317
#, python-format
msgid "Could not find trust: %(trust_id)s"
msgstr ""
-#: keystone/exception.py:308
+#: keystone/exception.py:321
#, python-format
msgid "No remaining uses for trust: %(trust_id)s"
msgstr ""
-#: keystone/exception.py:312
+#: keystone/exception.py:325
#, python-format
msgid "Could not find credential: %(credential_id)s"
msgstr ""
-#: keystone/exception.py:316
+#: keystone/exception.py:329
#, python-format
msgid "Could not find version: %(version)s"
msgstr ""
-#: keystone/exception.py:320
+#: keystone/exception.py:333
#, python-format
msgid "Could not find Endpoint Group: %(endpoint_group_id)s"
msgstr ""
-#: keystone/exception.py:324
+#: keystone/exception.py:337
#, python-format
msgid "Could not find Identity Provider: %(idp_id)s"
msgstr ""
-#: keystone/exception.py:328
+#: keystone/exception.py:341
#, python-format
msgid "Could not find Service Provider: %(sp_id)s"
msgstr ""
-#: keystone/exception.py:332
+#: keystone/exception.py:345
#, python-format
msgid ""
"Could not find federated protocol %(protocol_id)s for Identity Provider: "
"%(idp_id)s"
msgstr ""
-#: keystone/exception.py:343
+#: keystone/exception.py:356
#, python-format
msgid ""
"Could not find %(group_or_option)s in domain configuration for domain "
"%(domain_id)s"
msgstr ""
-#: keystone/exception.py:348
+#: keystone/exception.py:361
#, python-format
msgid "Conflict occurred attempting to store %(type)s - %(details)s"
msgstr ""
-#: keystone/exception.py:356
+#: keystone/exception.py:369
msgid "An unexpected error prevented the server from fulfilling your request."
msgstr ""
-#: keystone/exception.py:359
+#: keystone/exception.py:372
#, python-format
msgid ""
"An unexpected error prevented the server from fulfilling your request: "
"%(exception)s"
msgstr ""
-#: keystone/exception.py:382
+#: keystone/exception.py:395
#, python-format
msgid "Unable to consume trust %(trust_id)s, unable to acquire lock."
msgstr ""
-#: keystone/exception.py:387
+#: keystone/exception.py:400
msgid ""
"Expected signing certificates are not available on the server. Please "
"check Keystone configuration."
msgstr ""
-#: keystone/exception.py:393
+#: keystone/exception.py:406
#, python-format
msgid "Malformed endpoint URL (%(endpoint)s), see ERROR log for details."
msgstr ""
-#: keystone/exception.py:398
+#: keystone/exception.py:411
#, python-format
msgid ""
"Group %(group_id)s returned by mapping %(mapping_id)s was not found in "
"the backend."
msgstr ""
-#: keystone/exception.py:403
+#: keystone/exception.py:416
#, python-format
msgid "Error while reading metadata file, %(reason)s"
msgstr ""
-#: keystone/exception.py:407
+#: keystone/exception.py:420
#, python-format
msgid ""
"Unexpected combination of grant attributes - User: %(user_id)s, Group: "
"%(group_id)s, Project: %(project_id)s, Domain: %(domain_id)s"
msgstr ""
-#: keystone/exception.py:414
+#: keystone/exception.py:427
msgid "The action you have requested has not been implemented."
msgstr ""
-#: keystone/exception.py:421
+#: keystone/exception.py:434
msgid "The service you have requested is no longer available on this server."
msgstr ""
-#: keystone/exception.py:428
+#: keystone/exception.py:441
#, python-format
msgid "The Keystone configuration file %(config_file)s could not be found."
msgstr ""
-#: keystone/exception.py:433
+#: keystone/exception.py:446
msgid ""
"No encryption keys found; run keystone-manage fernet_setup to bootstrap "
"one."
msgstr ""
-#: keystone/exception.py:438
+#: keystone/exception.py:451
#, python-format
msgid ""
"The Keystone domain-specific configuration has specified more than one "
"SQL driver (only one is permitted): %(source)s."
msgstr ""
-#: keystone/exception.py:445
+#: keystone/exception.py:458
#, python-format
msgid ""
"%(mod_name)s doesn't provide database migrations. The migration "
"repository path at %(path)s doesn't exist or isn't a directory."
msgstr ""
-#: keystone/exception.py:457
+#: keystone/exception.py:465
+msgid "Token version is unrecognizable or unsupported."
+msgstr ""
+
+#: keystone/exception.py:470
#, python-format
msgid ""
"Unable to sign SAML assertion. It is likely that this server does not "
@@ -446,107 +383,112 @@ msgid ""
" %(reason)s"
msgstr ""
-#: keystone/exception.py:465
+#: keystone/exception.py:478
msgid ""
"No Authorization headers found, cannot proceed with OAuth related calls, "
"if running under HTTPd or Apache, ensure WSGIPassAuthorization is set to "
"On."
msgstr ""
-#: keystone/notifications.py:250
+#: keystone/notifications.py:273
#, python-format
msgid "%(event)s is not a valid notification event, must be one of: %(actions)s"
msgstr ""
-#: keystone/notifications.py:259
+#: keystone/notifications.py:282
#, python-format
msgid "Method not callable: %s"
msgstr ""
-#: keystone/assignment/controllers.py:107 keystone/identity/controllers.py:69
+#: keystone/assignment/controllers.py:99 keystone/identity/controllers.py:71
#: keystone/resource/controllers.py:78
msgid "Name field is required and cannot be empty"
msgstr ""
-#: keystone/assignment/controllers.py:330
-#: keystone/assignment/controllers.py:753
+#: keystone/assignment/controllers.py:155
+#: keystone/assignment/controllers.py:174
+msgid "User roles not supported: tenant_id required"
+msgstr ""
+
+#: keystone/assignment/controllers.py:338
+#: keystone/assignment/controllers.py:579
msgid "Specify a domain or project, not both"
msgstr ""
-#: keystone/assignment/controllers.py:333
+#: keystone/assignment/controllers.py:341
msgid "Specify one of domain or project"
msgstr ""
-#: keystone/assignment/controllers.py:338
-#: keystone/assignment/controllers.py:758
+#: keystone/assignment/controllers.py:346
+#: keystone/assignment/controllers.py:584
msgid "Specify a user or group, not both"
msgstr ""
-#: keystone/assignment/controllers.py:341
+#: keystone/assignment/controllers.py:349
msgid "Specify one of user or group"
msgstr ""
-#: keystone/assignment/controllers.py:742
+#: keystone/assignment/controllers.py:568
msgid "Combining effective and group filter will always result in an empty list."
msgstr ""
-#: keystone/assignment/controllers.py:747
+#: keystone/assignment/controllers.py:573
msgid ""
"Combining effective, domain and inherited filters will always result in "
"an empty list."
msgstr ""
-#: keystone/assignment/core.py:228
+#: keystone/assignment/core.py:233
msgid "Must specify either domain or project"
msgstr ""
-#: keystone/assignment/core.py:493
+#: keystone/assignment/core.py:903
#, python-format
msgid "Project (%s)"
msgstr ""
-#: keystone/assignment/core.py:495
+#: keystone/assignment/core.py:905
#, python-format
msgid "Domain (%s)"
msgstr ""
-#: keystone/assignment/core.py:497
+#: keystone/assignment/core.py:907
msgid "Unknown Target"
msgstr ""
-#: keystone/assignment/backends/ldap.py:92
+#: keystone/assignment/backends/ldap.py:91
msgid "Domain metadata not supported by LDAP"
msgstr ""
-#: keystone/assignment/backends/ldap.py:381
+#: keystone/assignment/backends/ldap.py:397
#, python-format
msgid "User %(user_id)s already has role %(role_id)s in tenant %(tenant_id)s"
msgstr ""
-#: keystone/assignment/backends/ldap.py:387
+#: keystone/assignment/backends/ldap.py:403
#, python-format
msgid "Role %s not found"
msgstr ""
-#: keystone/assignment/backends/ldap.py:402
-#: keystone/assignment/backends/sql.py:335
+#: keystone/assignment/backends/ldap.py:418
+#: keystone/assignment/backends/sql.py:334
#, python-format
msgid "Cannot remove role that has not been granted, %s"
msgstr ""
-#: keystone/assignment/backends/sql.py:356
+#: keystone/assignment/backends/sql.py:410
#, python-format
msgid "Unexpected assignment type encountered, %s"
msgstr ""
-#: keystone/assignment/role_backends/ldap.py:61 keystone/catalog/core.py:103
-#: keystone/common/ldap/core.py:1401 keystone/resource/backends/ldap.py:149
+#: keystone/assignment/role_backends/ldap.py:61 keystone/catalog/core.py:135
+#: keystone/common/ldap/core.py:1400 keystone/resource/backends/ldap.py:148
#, python-format
msgid "Duplicate ID, %s."
msgstr ""
#: keystone/assignment/role_backends/ldap.py:69
-#: keystone/common/ldap/core.py:1391
+#: keystone/common/ldap/core.py:1390
#, python-format
msgid "Duplicate name, %s."
msgstr ""
@@ -556,222 +498,249 @@ msgstr ""
msgid "Cannot duplicate name %s"
msgstr ""
-#: keystone/auth/controllers.py:60
-#, python-format
-msgid ""
-"Cannot load an auth-plugin by class-name without a \"method\" attribute "
-"defined: %s"
-msgstr ""
-
-#: keystone/auth/controllers.py:71
-#, python-format
-msgid ""
-"Auth plugin %(plugin)s is requesting previously registered method "
-"%(method)s"
-msgstr ""
-
-#: keystone/auth/controllers.py:115
+#: keystone/auth/controllers.py:118
#, python-format
msgid ""
"Unable to reconcile identity attribute %(attribute)s as it has "
"conflicting values %(new)s and %(old)s"
msgstr ""
-#: keystone/auth/controllers.py:336
+#: keystone/auth/controllers.py:333
msgid "Scoping to both domain and project is not allowed"
msgstr ""
-#: keystone/auth/controllers.py:339
+#: keystone/auth/controllers.py:336
msgid "Scoping to both domain and trust is not allowed"
msgstr ""
-#: keystone/auth/controllers.py:342
+#: keystone/auth/controllers.py:339
msgid "Scoping to both project and trust is not allowed"
msgstr ""
-#: keystone/auth/controllers.py:512
+#: keystone/auth/controllers.py:509
msgid "User not found"
msgstr ""
-#: keystone/auth/controllers.py:616
+#: keystone/auth/controllers.py:613
msgid "A project-scoped token is required to produce a service catalog."
msgstr ""
-#: keystone/auth/plugins/external.py:46
+#: keystone/auth/plugins/external.py:42
msgid "No authenticated user"
msgstr ""
-#: keystone/auth/plugins/external.py:56
+#: keystone/auth/plugins/external.py:52
#, python-format
msgid "Unable to lookup user %s"
msgstr ""
-#: keystone/auth/plugins/external.py:107
+#: keystone/auth/plugins/external.py:100
msgid "auth_type is not Negotiate"
msgstr ""
-#: keystone/auth/plugins/mapped.py:244
-msgid "Could not map user"
-msgstr ""
-
-#: keystone/auth/plugins/oauth1.py:39
-#, python-format
-msgid "%s not supported"
+#: keystone/auth/plugins/mapped.py:239
+msgid ""
+"Could not map user while setting ephemeral user identity. Either mapping "
+"rules must specify user id/name or REMOTE_USER environment variable must "
+"be set."
msgstr ""
-#: keystone/auth/plugins/oauth1.py:57
+#: keystone/auth/plugins/oauth1.py:51
msgid "Access token is expired"
msgstr ""
-#: keystone/auth/plugins/oauth1.py:71
+#: keystone/auth/plugins/oauth1.py:65
msgid "Could not validate the access token"
msgstr ""
-#: keystone/auth/plugins/password.py:46
+#: keystone/auth/plugins/password.py:45
msgid "Invalid username or password"
msgstr ""
-#: keystone/auth/plugins/token.py:72 keystone/token/controllers.py:160
+#: keystone/auth/plugins/token.py:70 keystone/token/controllers.py:162
msgid "rescope a scoped token"
msgstr ""
-#: keystone/catalog/controllers.py:168
+#: keystone/catalog/controllers.py:175
#, python-format
msgid "Conflicting region IDs specified: \"%(url_id)s\" != \"%(ref_id)s\""
msgstr ""
-#: keystone/common/authorization.py:47 keystone/common/wsgi.py:64
+#: keystone/cmd/cli.py:286
+msgid "At least one option must be provided"
+msgstr ""
+
+#: keystone/cmd/cli.py:293
+msgid "--all option cannot be mixed with other options"
+msgstr ""
+
+#: keystone/cmd/cli.py:300
#, python-format
-msgid "token reference must be a KeystoneToken type, got: %s"
+msgid "Unknown domain '%(name)s' specified by --domain-name"
msgstr ""
-#: keystone/common/base64utils.py:66
-msgid "pad must be single character"
+#: keystone/cmd/cli.py:355 keystone/tests/unit/test_cli.py:215
+msgid "At least one option must be provided, use either --all or --domain-name"
msgstr ""
-#: keystone/common/base64utils.py:215
+#: keystone/cmd/cli.py:361 keystone/tests/unit/test_cli.py:231
+msgid "The --all option cannot be used with the --domain-name option"
+msgstr ""
+
+#: keystone/cmd/cli.py:387 keystone/tests/unit/test_cli.py:248
#, python-format
-msgid "text is multiple of 4, but pad \"%s\" occurs before 2nd to last char"
+msgid ""
+"Invalid domain name: %(domain)s found in config file name: %(file)s - "
+"ignoring this file."
msgstr ""
-#: keystone/common/base64utils.py:219
+#: keystone/cmd/cli.py:395 keystone/tests/unit/test_cli.py:189
#, python-format
-msgid "text is multiple of 4, but pad \"%s\" occurs before non-pad last char"
+msgid ""
+"Domain: %(domain)s already has a configuration defined - ignoring file: "
+"%(file)s."
msgstr ""
-#: keystone/common/base64utils.py:225
+#: keystone/cmd/cli.py:409
#, python-format
-msgid "text is not a multiple of 4, but contains pad \"%s\""
+msgid "Error parsing configuration file for domain: %(domain)s, file: %(file)s."
msgstr ""
-#: keystone/common/base64utils.py:244 keystone/common/base64utils.py:265
-msgid "padded base64url text must be multiple of 4 characters"
+#: keystone/cmd/cli.py:442
+#, python-format
+msgid ""
+"To get a more detailed information on this error, re-run this command for"
+" the specific domain, i.e.: keystone-manage domain_config_upload "
+"--domain-name %s"
msgstr ""
-#: keystone/common/controller.py:237 keystone/token/providers/common.py:589
-msgid "Non-default domain is not supported"
+#: keystone/cmd/cli.py:460
+#, python-format
+msgid "Unable to locate domain config directory: %s"
+msgstr ""
+
+#: keystone/cmd/cli.py:493
+msgid ""
+"Unable to access the keystone database, please check it is configured "
+"correctly."
msgstr ""
-#: keystone/common/controller.py:305 keystone/identity/core.py:428
-#: keystone/resource/core.py:761 keystone/resource/backends/ldap.py:61
+#: keystone/cmd/cli.py:559
#, python-format
-msgid "Expected dict or list: %s"
+msgid "Error while parsing rules %(path)s: %(err)s"
msgstr ""
-#: keystone/common/controller.py:318
-msgid "Marker could not be found"
+#: keystone/cmd/cli.py:568
+#, python-format
+msgid "Error while opening file %(path)s: %(err)s"
msgstr ""
-#: keystone/common/controller.py:329
-msgid "Invalid limit value"
+#: keystone/cmd/cli.py:578
+#, python-format
+msgid "Error while parsing line: '%(line)s': %(err)s"
msgstr ""
-#: keystone/common/controller.py:637
-msgid "Cannot change Domain ID"
+#: keystone/common/authorization.py:47 keystone/common/wsgi.py:66
+#, python-format
+msgid "token reference must be a KeystoneToken type, got: %s"
+msgstr ""
+
+#: keystone/common/base64utils.py:71
+msgid "pad must be single character"
msgstr ""
-#: keystone/common/controller.py:666
-msgid "domain_id is required as part of entity"
+#: keystone/common/base64utils.py:220
+#, python-format
+msgid "text is multiple of 4, but pad \"%s\" occurs before 2nd to last char"
msgstr ""
-#: keystone/common/controller.py:701
-msgid "A domain-scoped token must be used"
+#: keystone/common/base64utils.py:224
+#, python-format
+msgid "text is multiple of 4, but pad \"%s\" occurs before non-pad last char"
msgstr ""
-#: keystone/common/dependency.py:68
+#: keystone/common/base64utils.py:230
#, python-format
-msgid "Unregistered dependency: %(name)s for %(targets)s"
+msgid "text is not a multiple of 4, but contains pad \"%s\""
msgstr ""
-#: keystone/common/dependency.py:108
-msgid "event_callbacks must be a dict"
+#: keystone/common/base64utils.py:249 keystone/common/base64utils.py:270
+msgid "padded base64url text must be multiple of 4 characters"
msgstr ""
-#: keystone/common/dependency.py:113
+#: keystone/common/clean.py:24
#, python-format
-msgid "event_callbacks[%s] must be a dict"
+msgid "%s cannot be empty."
msgstr ""
-#: keystone/common/pemutils.py:223
+#: keystone/common/clean.py:26
#, python-format
-msgid "unknown pem_type \"%(pem_type)s\", valid types are: %(valid_pem_types)s"
+msgid "%(property_name)s cannot be less than %(min_length)s characters."
msgstr ""
-#: keystone/common/pemutils.py:242
+#: keystone/common/clean.py:31
#, python-format
-msgid ""
-"unknown pem header \"%(pem_header)s\", valid headers are: "
-"%(valid_pem_headers)s"
+msgid "%(property_name)s should not be greater than %(max_length)s characters."
msgstr ""
-#: keystone/common/pemutils.py:298
+#: keystone/common/clean.py:40
#, python-format
-msgid "failed to find end matching \"%s\""
+msgid "%(property_name)s is not a %(display_expected_type)s"
+msgstr ""
+
+#: keystone/common/controller.py:229 keystone/common/controller.py:245
+#: keystone/token/providers/common.py:636
+msgid "Non-default domain is not supported"
msgstr ""
-#: keystone/common/pemutils.py:302
+#: keystone/common/controller.py:322 keystone/common/controller.py:350
+#: keystone/identity/core.py:506 keystone/resource/core.py:774
+#: keystone/resource/backends/ldap.py:61
#, python-format
-msgid ""
-"beginning & end PEM headers do not match (%(begin_pem_header)s!= "
-"%(end_pem_header)s)"
+msgid "Expected dict or list: %s"
+msgstr ""
+
+#: keystone/common/controller.py:363
+msgid "Marker could not be found"
+msgstr ""
+
+#: keystone/common/controller.py:374
+msgid "Invalid limit value"
msgstr ""
-#: keystone/common/pemutils.py:377
+#: keystone/common/controller.py:682
+msgid "Cannot change Domain ID"
+msgstr ""
+
+#: keystone/common/dependency.py:64
#, python-format
-msgid "unknown pem_type: \"%s\""
+msgid "Unregistered dependency: %(name)s for %(targets)s"
msgstr ""
-#: keystone/common/pemutils.py:389
+#: keystone/common/json_home.py:76
#, python-format
-msgid ""
-"failed to base64 decode %(pem_type)s PEM at position%(position)d: "
-"%(err_msg)s"
+msgid "Unexpected status requested for JSON Home response, %s"
msgstr ""
-#: keystone/common/utils.py:164 keystone/credential/controllers.py:44
+#: keystone/common/utils.py:166 keystone/credential/controllers.py:44
msgid "Invalid blob in credential"
msgstr ""
-#: keystone/common/wsgi.py:330
+#: keystone/common/wsgi.py:335
#, python-format
msgid "%s field is required and cannot be empty"
msgstr ""
-#: keystone/common/wsgi.py:342
+#: keystone/common/wsgi.py:347
#, python-format
msgid "%s field(s) cannot be empty"
msgstr ""
-#: keystone/common/wsgi.py:563
+#: keystone/common/wsgi.py:558
msgid "The resource could not be found."
msgstr ""
-#: keystone/common/wsgi.py:704
-#, python-format
-msgid "Unexpected status requested for JSON Home response, %s"
-msgstr ""
-
-#: keystone/common/cache/_memcache_pool.py:113
+#: keystone/common/cache/_memcache_pool.py:124
#, python-format
msgid "Unable to get a connection from pool id %(id)s after %(seconds)s seconds."
msgstr ""
@@ -815,31 +784,31 @@ msgid ""
" \"REQUIRED\""
msgstr ""
-#: keystone/common/kvs/core.py:71
+#: keystone/common/kvs/core.py:70
#, python-format
msgid "Lock Timeout occurred for key, %(target)s"
msgstr ""
-#: keystone/common/kvs/core.py:106
+#: keystone/common/kvs/core.py:105
#, python-format
msgid "KVS region %s is already configured. Cannot reconfigure."
msgstr ""
-#: keystone/common/kvs/core.py:145
+#: keystone/common/kvs/core.py:144
#, python-format
msgid "Key Value Store not configured: %s"
msgstr ""
-#: keystone/common/kvs/core.py:198
+#: keystone/common/kvs/core.py:197
msgid "`key_mangler` option must be a function reference"
msgstr ""
-#: keystone/common/kvs/core.py:353
+#: keystone/common/kvs/core.py:352
#, python-format
msgid "Lock key must match target key: %(lock)s != %(target)s"
msgstr ""
-#: keystone/common/kvs/core.py:357
+#: keystone/common/kvs/core.py:356
msgid "Must be called within an active lock context."
msgstr ""
@@ -848,28 +817,28 @@ msgstr ""
msgid "Maximum lock attempts on %s occurred."
msgstr ""
-#: keystone/common/kvs/backends/memcached.py:108
+#: keystone/common/kvs/backends/memcached.py:109
#, python-format
msgid ""
-"Backend `%(driver)s` is not a valid memcached backend. Valid drivers: "
-"%(driver_list)s"
+"Backend `%(backend)s` is not a valid memcached backend. Valid backends: "
+"%(backend_list)s"
msgstr ""
-#: keystone/common/kvs/backends/memcached.py:178
+#: keystone/common/kvs/backends/memcached.py:185
msgid "`key_mangler` functions must be callable."
msgstr ""
-#: keystone/common/ldap/core.py:191
+#: keystone/common/ldap/core.py:193
#, python-format
msgid "Invalid LDAP deref option: %(option)s. Choose one of: %(options)s"
msgstr ""
-#: keystone/common/ldap/core.py:201
+#: keystone/common/ldap/core.py:203
#, python-format
msgid "Invalid LDAP TLS certs option: %(option)s. Choose one of: %(options)s"
msgstr ""
-#: keystone/common/ldap/core.py:213
+#: keystone/common/ldap/core.py:215
#, python-format
msgid "Invalid LDAP scope: %(scope)s. Choose one of: %(options)s"
msgstr ""
@@ -893,38 +862,38 @@ msgstr ""
msgid "tls_cacertdir %s not found or is not a directory"
msgstr ""
-#: keystone/common/ldap/core.py:1326
+#: keystone/common/ldap/core.py:1325
#, python-format
msgid "ID attribute %(id_attr)s not found in LDAP object %(dn)s"
msgstr ""
-#: keystone/common/ldap/core.py:1370
+#: keystone/common/ldap/core.py:1369
#, python-format
msgid "LDAP %s create"
msgstr ""
-#: keystone/common/ldap/core.py:1375
+#: keystone/common/ldap/core.py:1374
#, python-format
msgid "LDAP %s update"
msgstr ""
-#: keystone/common/ldap/core.py:1380
+#: keystone/common/ldap/core.py:1379
#, python-format
msgid "LDAP %s delete"
msgstr ""
-#: keystone/common/ldap/core.py:1522
+#: keystone/common/ldap/core.py:1521
msgid ""
"Disabling an entity where the 'enable' attribute is ignored by "
"configuration."
msgstr ""
-#: keystone/common/ldap/core.py:1533
+#: keystone/common/ldap/core.py:1532
#, python-format
msgid "Cannot change %(option_name)s %(attr)s"
msgstr ""
-#: keystone/common/ldap/core.py:1620
+#: keystone/common/ldap/core.py:1619
#, python-format
msgid "Member %(member)s is already a member of group %(group)s"
msgstr ""
@@ -935,31 +904,38 @@ msgid ""
" self "
msgstr ""
-#: keystone/common/sql/core.py:410
+#: keystone/common/sql/core.py:445
msgid "Duplicate Entry"
msgstr ""
-#: keystone/common/sql/core.py:426
+#: keystone/common/sql/core.py:461
#, python-format
msgid "An unexpected error occurred when trying to store %s"
msgstr ""
-#: keystone/common/sql/migration_helpers.py:187
-#: keystone/common/sql/migration_helpers.py:245
+#: keystone/common/sql/migration_helpers.py:171
+#: keystone/common/sql/migration_helpers.py:213
#, python-format
msgid "%s extension does not exist."
msgstr ""
+#: keystone/common/validation/__init__.py:41
+#, python-format
+msgid ""
+"validated expected to find %(param_name)r in function signature for "
+"%(func_name)r."
+msgstr ""
+
#: keystone/common/validation/validators.py:54
#, python-format
msgid "Invalid input for field '%(path)s'. The value is '%(value)s'."
msgstr ""
-#: keystone/contrib/ec2/controllers.py:318
+#: keystone/contrib/ec2/controllers.py:324
msgid "Token belongs to another user"
msgstr ""
-#: keystone/contrib/ec2/controllers.py:346
+#: keystone/contrib/ec2/controllers.py:352
msgid "Credential belongs to another user"
msgstr ""
@@ -972,42 +948,37 @@ msgstr ""
msgid "Endpoint Group Project Association not found"
msgstr ""
-#: keystone/contrib/endpoint_policy/core.py:258
-#, python-format
-msgid "No policy is associated with endpoint %(endpoint_id)s."
-msgstr ""
-
-#: keystone/contrib/federation/controllers.py:274
-msgid "Missing entity ID from environment"
-msgstr ""
-
-#: keystone/contrib/federation/controllers.py:282
+#: keystone/contrib/federation/controllers.py:268
msgid "Request must have an origin query parameter"
msgstr ""
-#: keystone/contrib/federation/controllers.py:292
+#: keystone/contrib/federation/controllers.py:273
#, python-format
msgid "%(host)s is not a trusted dashboard host"
msgstr ""
-#: keystone/contrib/federation/controllers.py:333
+#: keystone/contrib/federation/controllers.py:304
+msgid "Missing entity ID from environment"
+msgstr ""
+
+#: keystone/contrib/federation/controllers.py:344
msgid "Use a project scoped token when attempting to create a SAML assertion"
msgstr ""
-#: keystone/contrib/federation/idp.py:454
+#: keystone/contrib/federation/idp.py:476
#, python-format
msgid "Cannot open certificate %(cert_file)s. Reason: %(reason)s"
msgstr ""
-#: keystone/contrib/federation/idp.py:521
+#: keystone/contrib/federation/idp.py:543
msgid "Ensure configuration option idp_entity_id is set."
msgstr ""
-#: keystone/contrib/federation/idp.py:524
+#: keystone/contrib/federation/idp.py:546
msgid "Ensure configuration option idp_sso_endpoint is set."
msgstr ""
-#: keystone/contrib/federation/idp.py:544
+#: keystone/contrib/federation/idp.py:566
msgid ""
"idp_contact_type must be one of: [technical, other, support, "
"administrative or billing."
@@ -1017,95 +988,93 @@ msgstr ""
msgid "Federation token is expired"
msgstr ""
-#: keystone/contrib/federation/utils.py:208
-msgid ""
-"Could not find Identity Provider identifier in environment, check "
-"[federation] remote_id_attribute for details."
+#: keystone/contrib/federation/utils.py:231
+msgid "Could not find Identity Provider identifier in environment"
msgstr ""
-#: keystone/contrib/federation/utils.py:213
+#: keystone/contrib/federation/utils.py:235
msgid ""
"Incoming identity provider identifier not included among the accepted "
"identifiers."
msgstr ""
-#: keystone/contrib/federation/utils.py:501
+#: keystone/contrib/federation/utils.py:523
#, python-format
msgid "User type %s not supported"
msgstr ""
-#: keystone/contrib/federation/utils.py:537
+#: keystone/contrib/federation/utils.py:559
#, python-format
msgid ""
"Invalid rule: %(identity_value)s. Both 'groups' and 'domain' keywords "
"must be specified."
msgstr ""
-#: keystone/contrib/federation/utils.py:753
+#: keystone/contrib/federation/utils.py:775
#, python-format
msgid "Identity Provider %(idp)s is disabled"
msgstr ""
-#: keystone/contrib/federation/utils.py:761
+#: keystone/contrib/federation/utils.py:783
#, python-format
msgid "Service Provider %(sp)s is disabled"
msgstr ""
-#: keystone/contrib/oauth1/controllers.py:99
+#: keystone/contrib/oauth1/controllers.py:96
msgid "Cannot change consumer secret"
msgstr ""
-#: keystone/contrib/oauth1/controllers.py:131
+#: keystone/contrib/oauth1/controllers.py:128
msgid "Cannot list request tokens with a token issued via delegation."
msgstr ""
-#: keystone/contrib/oauth1/controllers.py:192
+#: keystone/contrib/oauth1/controllers.py:189
#: keystone/contrib/oauth1/backends/sql.py:270
msgid "User IDs do not match"
msgstr ""
-#: keystone/contrib/oauth1/controllers.py:199
+#: keystone/contrib/oauth1/controllers.py:196
msgid "Could not find role"
msgstr ""
-#: keystone/contrib/oauth1/controllers.py:248
+#: keystone/contrib/oauth1/controllers.py:245
msgid "Invalid signature"
msgstr ""
-#: keystone/contrib/oauth1/controllers.py:299
-#: keystone/contrib/oauth1/controllers.py:377
+#: keystone/contrib/oauth1/controllers.py:296
+#: keystone/contrib/oauth1/controllers.py:374
msgid "Request token is expired"
msgstr ""
-#: keystone/contrib/oauth1/controllers.py:313
+#: keystone/contrib/oauth1/controllers.py:310
msgid "There should not be any non-oauth parameters"
msgstr ""
-#: keystone/contrib/oauth1/controllers.py:317
+#: keystone/contrib/oauth1/controllers.py:314
msgid "provided consumer key does not match stored consumer key"
msgstr ""
-#: keystone/contrib/oauth1/controllers.py:321
+#: keystone/contrib/oauth1/controllers.py:318
msgid "provided verifier does not match stored verifier"
msgstr ""
-#: keystone/contrib/oauth1/controllers.py:325
+#: keystone/contrib/oauth1/controllers.py:322
msgid "provided request key does not match stored request key"
msgstr ""
-#: keystone/contrib/oauth1/controllers.py:329
+#: keystone/contrib/oauth1/controllers.py:326
msgid "Request Token does not have an authorizing user id"
msgstr ""
-#: keystone/contrib/oauth1/controllers.py:366
+#: keystone/contrib/oauth1/controllers.py:363
msgid "Cannot authorize a request token with a token issued via delegation."
msgstr ""
-#: keystone/contrib/oauth1/controllers.py:396
+#: keystone/contrib/oauth1/controllers.py:390
msgid "authorizing user does not have role required"
msgstr ""
-#: keystone/contrib/oauth1/controllers.py:409
+#: keystone/contrib/oauth1/controllers.py:403
msgid "User is not a member of the requested project"
msgstr ""
@@ -1126,23 +1095,28 @@ msgstr ""
msgid "invalid date format %s"
msgstr ""
-#: keystone/contrib/revoke/core.py:150
+#: keystone/contrib/revoke/core.py:159
msgid ""
"The revoke call must not have both domain_id and project_id. This is a "
"bug in the Keystone server. The current request is aborted."
msgstr ""
-#: keystone/contrib/revoke/core.py:218 keystone/token/provider.py:207
-#: keystone/token/provider.py:230 keystone/token/provider.py:296
-#: keystone/token/provider.py:303
+#: keystone/contrib/revoke/core.py:227 keystone/token/provider.py:197
+#: keystone/token/provider.py:220 keystone/token/provider.py:286
+#: keystone/token/provider.py:293
msgid "Failed to validate token"
msgstr ""
-#: keystone/identity/controllers.py:72
+#: keystone/endpoint_policy/core.py:261
+#, python-format
+msgid "No policy is associated with endpoint %(endpoint_id)s."
+msgstr ""
+
+#: keystone/identity/controllers.py:74
msgid "Enabled field must be a boolean"
msgstr ""
-#: keystone/identity/controllers.py:98
+#: keystone/identity/controllers.py:100
msgid "Enabled field should be a boolean"
msgstr ""
@@ -1151,33 +1125,40 @@ msgstr ""
msgid "Database at /domains/%s/config"
msgstr ""
-#: keystone/identity/core.py:287 keystone/identity/backends/ldap.py:59
-#: keystone/identity/backends/ldap.py:61 keystone/identity/backends/ldap.py:67
-#: keystone/identity/backends/ldap.py:69 keystone/identity/backends/sql.py:104
+#: keystone/identity/core.py:189
+#, python-format
+msgid ""
+"Domain specific sql drivers are not supported via the Identity API. One "
+"is specified in /domains/%s/config"
+msgstr ""
+
+#: keystone/identity/core.py:361 keystone/identity/backends/ldap.py:58
+#: keystone/identity/backends/ldap.py:60 keystone/identity/backends/ldap.py:66
+#: keystone/identity/backends/ldap.py:68 keystone/identity/backends/sql.py:104
#: keystone/identity/backends/sql.py:106
msgid "Invalid user / password"
msgstr ""
-#: keystone/identity/core.py:693
+#: keystone/identity/core.py:771
#, python-format
msgid "User is disabled: %s"
msgstr ""
-#: keystone/identity/core.py:735
+#: keystone/identity/core.py:813
msgid "Cannot change user ID"
msgstr ""
-#: keystone/identity/backends/ldap.py:99
+#: keystone/identity/backends/ldap.py:98
msgid "Cannot change user name"
msgstr ""
-#: keystone/identity/backends/ldap.py:188 keystone/identity/backends/sql.py:188
+#: keystone/identity/backends/ldap.py:187 keystone/identity/backends/sql.py:188
#: keystone/identity/backends/sql.py:206
#, python-format
msgid "User '%(user_id)s' not found in group '%(group_id)s'"
msgstr ""
-#: keystone/identity/backends/ldap.py:339
+#: keystone/identity/backends/ldap.py:338
#, python-format
msgid "User %(user_id)s is already a member of group %(group_id)s"
msgstr ""
@@ -1186,198 +1167,168 @@ msgstr ""
msgid "Found invalid token: scoped to both project and domain."
msgstr ""
-#: keystone/openstack/common/versionutils.py:108
-#, python-format
-msgid ""
-"%(what)s is deprecated as of %(as_of)s in favor of %(in_favor_of)s and "
-"may be removed in %(remove_in)s."
-msgstr ""
-
-#: keystone/openstack/common/versionutils.py:112
-#, python-format
-msgid ""
-"%(what)s is deprecated as of %(as_of)s and may be removed in "
-"%(remove_in)s. It will not be superseded."
-msgstr ""
-
-#: keystone/openstack/common/versionutils.py:116
-#, python-format
-msgid "%(what)s is deprecated as of %(as_of)s in favor of %(in_favor_of)s."
-msgstr ""
-
-#: keystone/openstack/common/versionutils.py:119
-#, python-format
-msgid "%(what)s is deprecated as of %(as_of)s. It will not be superseded."
-msgstr ""
-
-#: keystone/openstack/common/versionutils.py:241
-#, python-format
-msgid "Deprecated: %s"
-msgstr ""
-
-#: keystone/openstack/common/versionutils.py:259
-#, python-format
-msgid "Fatal call to deprecated config: %(msg)s"
-msgstr ""
-
-#: keystone/resource/controllers.py:231
+#: keystone/resource/controllers.py:234
msgid ""
"Cannot use parents_as_list and parents_as_ids query params at the same "
"time."
msgstr ""
-#: keystone/resource/controllers.py:237
+#: keystone/resource/controllers.py:240
msgid ""
"Cannot use subtree_as_list and subtree_as_ids query params at the same "
"time."
msgstr ""
-#: keystone/resource/core.py:80
+#: keystone/resource/core.py:82
#, python-format
msgid "max hierarchy depth reached for %s branch."
msgstr ""
-#: keystone/resource/core.py:97
+#: keystone/resource/core.py:100
msgid "cannot create a project within a different domain than its parents."
msgstr ""
-#: keystone/resource/core.py:101
+#: keystone/resource/core.py:104
#, python-format
msgid "cannot create a project in a branch containing a disabled project: %s"
msgstr ""
-#: keystone/resource/core.py:123
+#: keystone/resource/core.py:126
#, python-format
msgid "Domain is disabled: %s"
msgstr ""
-#: keystone/resource/core.py:141
+#: keystone/resource/core.py:145
#, python-format
msgid "Domain cannot be named %s"
msgstr ""
-#: keystone/resource/core.py:144
+#: keystone/resource/core.py:148
#, python-format
msgid "Domain cannot have ID %s"
msgstr ""
-#: keystone/resource/core.py:156
+#: keystone/resource/core.py:160
#, python-format
msgid "Project is disabled: %s"
msgstr ""
-#: keystone/resource/core.py:176
+#: keystone/resource/core.py:180
#, python-format
msgid "cannot enable project %s since it has disabled parents"
msgstr ""
-#: keystone/resource/core.py:184
+#: keystone/resource/core.py:188
#, python-format
msgid "cannot disable project %s since its subtree contains enabled projects"
msgstr ""
-#: keystone/resource/core.py:195
+#: keystone/resource/core.py:199
msgid "Update of `parent_id` is not allowed."
msgstr ""
-#: keystone/resource/core.py:222
+#: keystone/resource/core.py:226
#, python-format
msgid "cannot delete the project %s since it is not a leaf in the hierarchy."
msgstr ""
-#: keystone/resource/core.py:376
+#: keystone/resource/core.py:253
+msgid "Project field is required and cannot be empty."
+msgstr ""
+
+#: keystone/resource/core.py:389
msgid "Multiple domains are not supported"
msgstr ""
-#: keystone/resource/core.py:429
+#: keystone/resource/core.py:442
msgid "delete the default domain"
msgstr ""
-#: keystone/resource/core.py:440
+#: keystone/resource/core.py:453
msgid "cannot delete a domain that is enabled, please disable it first."
msgstr ""
-#: keystone/resource/core.py:841
+#: keystone/resource/core.py:859
msgid "No options specified"
msgstr ""
-#: keystone/resource/core.py:847
+#: keystone/resource/core.py:865
#, python-format
msgid ""
"The value of group %(group)s specified in the config should be a "
"dictionary of options"
msgstr ""
-#: keystone/resource/core.py:871
+#: keystone/resource/core.py:889
#, python-format
msgid ""
"Option %(option)s found with no group specified while checking domain "
"configuration request"
msgstr ""
-#: keystone/resource/core.py:878
+#: keystone/resource/core.py:896
#, python-format
msgid "Group %(group)s is not supported for domain specific configurations"
msgstr ""
-#: keystone/resource/core.py:885
+#: keystone/resource/core.py:903
#, python-format
msgid ""
"Option %(option)s in group %(group)s is not supported for domain specific"
" configurations"
msgstr ""
-#: keystone/resource/core.py:938
+#: keystone/resource/core.py:956
msgid "An unexpected error occurred when retrieving domain configs"
msgstr ""
-#: keystone/resource/core.py:1013 keystone/resource/core.py:1097
-#: keystone/resource/core.py:1167 keystone/resource/config_backends/sql.py:70
+#: keystone/resource/core.py:1035 keystone/resource/core.py:1119
+#: keystone/resource/core.py:1190 keystone/resource/config_backends/sql.py:70
#, python-format
msgid "option %(option)s in group %(group)s"
msgstr ""
-#: keystone/resource/core.py:1016 keystone/resource/core.py:1102
-#: keystone/resource/core.py:1163
+#: keystone/resource/core.py:1038 keystone/resource/core.py:1124
+#: keystone/resource/core.py:1186
#, python-format
msgid "group %(group)s"
msgstr ""
-#: keystone/resource/core.py:1018
+#: keystone/resource/core.py:1040
msgid "any options"
msgstr ""
-#: keystone/resource/core.py:1062
+#: keystone/resource/core.py:1084
#, python-format
msgid ""
"Trying to update option %(option)s in group %(group)s, so that, and only "
"that, option must be specified in the config"
msgstr ""
-#: keystone/resource/core.py:1067
+#: keystone/resource/core.py:1089
#, python-format
msgid ""
"Trying to update group %(group)s, so that, and only that, group must be "
"specified in the config"
msgstr ""
-#: keystone/resource/core.py:1076
+#: keystone/resource/core.py:1098
#, python-format
msgid ""
"request to update group %(group)s, but config provided contains group "
"%(group_other)s instead"
msgstr ""
-#: keystone/resource/core.py:1083
+#: keystone/resource/core.py:1105
#, python-format
msgid ""
"Trying to update option %(option)s in group %(group)s, but config "
"provided contains option %(option_other)s instead"
msgstr ""
-#: keystone/resource/backends/ldap.py:151
-#: keystone/resource/backends/ldap.py:159
-#: keystone/resource/backends/ldap.py:163
+#: keystone/resource/backends/ldap.py:150
+#: keystone/resource/backends/ldap.py:158
+#: keystone/resource/backends/ldap.py:162
msgid "Domains are read-only against LDAP"
msgstr ""
@@ -1395,54 +1346,79 @@ msgstr ""
#: keystone/token/controllers.py:391
#, python-format
+msgid "Project ID not found: %(t_id)s"
+msgstr ""
+
+#: keystone/token/controllers.py:395
+#, python-format
msgid "User %(u_id)s is unauthorized for tenant %(t_id)s"
msgstr ""
-#: keystone/token/controllers.py:410 keystone/token/controllers.py:413
+#: keystone/token/controllers.py:414 keystone/token/controllers.py:417
msgid "Token does not belong to specified tenant."
msgstr ""
-#: keystone/token/persistence/backends/kvs.py:133
+#: keystone/token/persistence/backends/kvs.py:132
#, python-format
msgid "Unknown token version %s"
msgstr ""
-#: keystone/token/providers/common.py:250
-#: keystone/token/providers/common.py:355
+#: keystone/token/providers/common.py:54
+msgid "Domains are not supported by the v2 API. Please use the v3 API instead."
+msgstr ""
+
+#: keystone/token/providers/common.py:64
+#, python-format
+msgid ""
+"Project not found in the default domain (please use the v3 API instead): "
+"%s"
+msgstr ""
+
+#: keystone/token/providers/common.py:83
+#, python-format
+msgid "User not found in the default domain (please use the v3 API instead): %s"
+msgstr ""
+
+#: keystone/token/providers/common.py:292
+#: keystone/token/providers/common.py:397
#, python-format
msgid "User %(user_id)s has no access to project %(project_id)s"
msgstr ""
-#: keystone/token/providers/common.py:255
-#: keystone/token/providers/common.py:360
+#: keystone/token/providers/common.py:297
+#: keystone/token/providers/common.py:402
#, python-format
msgid "User %(user_id)s has no access to domain %(domain_id)s"
msgstr ""
-#: keystone/token/providers/common.py:282
+#: keystone/token/providers/common.py:324
msgid "Trustor is disabled."
msgstr ""
-#: keystone/token/providers/common.py:346
+#: keystone/token/providers/common.py:388
msgid "Trustee has no delegated roles."
msgstr ""
-#: keystone/token/providers/common.py:407
+#: keystone/token/providers/common.py:449
#, python-format
msgid "Invalid audit info data type: %(data)s (%(type)s)"
msgstr ""
-#: keystone/token/providers/common.py:435
+#: keystone/token/providers/common.py:477
msgid "User is not a trustee."
msgstr ""
-#: keystone/token/providers/common.py:579
+#: keystone/token/providers/common.py:546
+msgid "The configured token provider does not support bind authentication."
+msgstr ""
+
+#: keystone/token/providers/common.py:626
msgid ""
"Attempting to use OS-FEDERATION token with V2 Identity Service, use V3 "
"Authentication"
msgstr ""
-#: keystone/token/providers/common.py:597
+#: keystone/token/providers/common.py:644
msgid "Domain scoped token is not supported"
msgstr ""
@@ -1450,71 +1426,75 @@ msgstr ""
msgid "Unable to sign token."
msgstr ""
-#: keystone/token/providers/fernet/core.py:210
+#: keystone/token/providers/fernet/core.py:182
msgid ""
"This is not a v2.0 Fernet token. Use v3 for trust, domain, or federated "
"tokens."
msgstr ""
-#: keystone/token/providers/fernet/token_formatters.py:189
+#: keystone/token/providers/fernet/token_formatters.py:80
+msgid "This is not a recognized Fernet token"
+msgstr ""
+
+#: keystone/token/providers/fernet/token_formatters.py:202
#, python-format
msgid "This is not a recognized Fernet payload version: %s"
msgstr ""
-#: keystone/trust/controllers.py:148
+#: keystone/trust/controllers.py:144
msgid "Redelegation allowed for delegated by trust only"
msgstr ""
-#: keystone/trust/controllers.py:181
+#: keystone/trust/controllers.py:177
msgid "The authenticated user should match the trustor."
msgstr ""
-#: keystone/trust/controllers.py:186
+#: keystone/trust/controllers.py:182
msgid "At least one role should be specified."
msgstr ""
-#: keystone/trust/core.py:57
+#: keystone/trust/core.py:61
#, python-format
msgid ""
"Remaining redelegation depth of %(redelegation_depth)d out of allowed "
"range of [0..%(max_count)d]"
msgstr ""
-#: keystone/trust/core.py:66
+#: keystone/trust/core.py:70
#, python-format
msgid ""
"Field \"remaining_uses\" is set to %(value)s while it must not be set in "
"order to redelegate a trust"
msgstr ""
-#: keystone/trust/core.py:77
+#: keystone/trust/core.py:81
msgid "Requested expiration time is more than redelegated trust can provide"
msgstr ""
-#: keystone/trust/core.py:87
+#: keystone/trust/core.py:91
msgid "Some of requested roles are not in redelegated trust"
msgstr ""
-#: keystone/trust/core.py:116
+#: keystone/trust/core.py:120
msgid "One of the trust agents is disabled or deleted"
msgstr ""
-#: keystone/trust/core.py:135
+#: keystone/trust/core.py:139
msgid "remaining_uses must be a positive integer or null."
msgstr ""
-#: keystone/trust/core.py:141
+#: keystone/trust/core.py:145
#, python-format
msgid ""
"Requested redelegation depth of %(requested_count)d is greater than "
"allowed %(max_count)d"
msgstr ""
-#: keystone/trust/core.py:147
+#: keystone/trust/core.py:152
msgid "remaining_uses must not be set if redelegation is allowed"
msgstr ""
-#: keystone/trust/core.py:157
+#: keystone/trust/core.py:162
msgid ""
"Modifying \"redelegation_count\" upon redelegation is forbidden. Omitting"
" this parameter is advised."
diff --git a/keystone-moon/keystone/locale/ko_KR/LC_MESSAGES/keystone-log-critical.po b/keystone-moon/keystone/locale/ko_KR/LC_MESSAGES/keystone-log-critical.po
index b7f255c4..6a6f1868 100644
--- a/keystone-moon/keystone/locale/ko_KR/LC_MESSAGES/keystone-log-critical.po
+++ b/keystone-moon/keystone/locale/ko_KR/LC_MESSAGES/keystone-log-critical.po
@@ -1,5 +1,5 @@
# Translations template for keystone.
-# Copyright (C) 2014 OpenStack Foundation
+# Copyright (C) 2015 OpenStack Foundation
# This file is distributed under the same license as the keystone project.
#
# Translators:
@@ -7,19 +7,18 @@ msgid ""
msgstr ""
"Project-Id-Version: Keystone\n"
"Report-Msgid-Bugs-To: https://bugs.launchpad.net/keystone\n"
-"POT-Creation-Date: 2014-09-07 06:06+0000\n"
+"POT-Creation-Date: 2015-08-06 06:28+0000\n"
"PO-Revision-Date: 2014-08-31 15:19+0000\n"
"Last-Translator: openstackjenkins <jenkins@openstack.org>\n"
-"Language-Team: Korean (Korea) (http://www.transifex.com/projects/p/keystone/"
+"Language-Team: Korean (Korea) (http://www.transifex.com/openstack/keystone/"
"language/ko_KR/)\n"
"Language: ko_KR\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
-"Generated-By: Babel 1.3\n"
+"Generated-By: Babel 2.0\n"
"Plural-Forms: nplurals=1; plural=0;\n"
-#: keystone/catalog/backends/templated.py:106
#, python-format
msgid "Unable to open template file %s"
msgstr "템플리트 파일 %s을(를) 열 수 없음"
diff --git a/keystone-moon/keystone/locale/pl_PL/LC_MESSAGES/keystone-log-critical.po b/keystone-moon/keystone/locale/pl_PL/LC_MESSAGES/keystone-log-critical.po
index b7749060..43b0dc54 100644
--- a/keystone-moon/keystone/locale/pl_PL/LC_MESSAGES/keystone-log-critical.po
+++ b/keystone-moon/keystone/locale/pl_PL/LC_MESSAGES/keystone-log-critical.po
@@ -1,5 +1,5 @@
# Translations template for keystone.
-# Copyright (C) 2014 OpenStack Foundation
+# Copyright (C) 2015 OpenStack Foundation
# This file is distributed under the same license as the keystone project.
#
# Translators:
@@ -7,20 +7,19 @@ msgid ""
msgstr ""
"Project-Id-Version: Keystone\n"
"Report-Msgid-Bugs-To: https://bugs.launchpad.net/keystone\n"
-"POT-Creation-Date: 2014-09-07 06:06+0000\n"
+"POT-Creation-Date: 2015-08-06 06:28+0000\n"
"PO-Revision-Date: 2014-08-31 15:19+0000\n"
"Last-Translator: openstackjenkins <jenkins@openstack.org>\n"
-"Language-Team: Polish (Poland) (http://www.transifex.com/projects/p/keystone/"
+"Language-Team: Polish (Poland) (http://www.transifex.com/openstack/keystone/"
"language/pl_PL/)\n"
"Language: pl_PL\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
-"Generated-By: Babel 1.3\n"
+"Generated-By: Babel 2.0\n"
"Plural-Forms: nplurals=3; plural=(n==1 ? 0 : n%10>=2 && n%10<=4 && (n%100<10 "
"|| n%100>=20) ? 1 : 2);\n"
-#: keystone/catalog/backends/templated.py:106
#, python-format
msgid "Unable to open template file %s"
msgstr "Błąd podczas otwierania pliku %s"
diff --git a/keystone-moon/keystone/locale/pt_BR/LC_MESSAGES/keystone-log-critical.po b/keystone-moon/keystone/locale/pt_BR/LC_MESSAGES/keystone-log-critical.po
index 689a23ec..48e0c8c7 100644
--- a/keystone-moon/keystone/locale/pt_BR/LC_MESSAGES/keystone-log-critical.po
+++ b/keystone-moon/keystone/locale/pt_BR/LC_MESSAGES/keystone-log-critical.po
@@ -1,5 +1,5 @@
# Translations template for keystone.
-# Copyright (C) 2014 OpenStack Foundation
+# Copyright (C) 2015 OpenStack Foundation
# This file is distributed under the same license as the keystone project.
#
# Translators:
@@ -7,19 +7,18 @@ msgid ""
msgstr ""
"Project-Id-Version: Keystone\n"
"Report-Msgid-Bugs-To: https://bugs.launchpad.net/keystone\n"
-"POT-Creation-Date: 2014-09-07 06:06+0000\n"
+"POT-Creation-Date: 2015-08-06 06:28+0000\n"
"PO-Revision-Date: 2014-08-31 15:19+0000\n"
"Last-Translator: openstackjenkins <jenkins@openstack.org>\n"
-"Language-Team: Portuguese (Brazil) (http://www.transifex.com/projects/p/"
+"Language-Team: Portuguese (Brazil) (http://www.transifex.com/openstack/"
"keystone/language/pt_BR/)\n"
"Language: pt_BR\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
-"Generated-By: Babel 1.3\n"
+"Generated-By: Babel 2.0\n"
"Plural-Forms: nplurals=2; plural=(n > 1);\n"
-#: keystone/catalog/backends/templated.py:106
#, python-format
msgid "Unable to open template file %s"
msgstr "Não é possível abrir o arquivo de modelo %s"
diff --git a/keystone-moon/keystone/locale/pt_BR/LC_MESSAGES/keystone-log-error.po b/keystone-moon/keystone/locale/pt_BR/LC_MESSAGES/keystone-log-error.po
index 5f81b98d..12e4591f 100644
--- a/keystone-moon/keystone/locale/pt_BR/LC_MESSAGES/keystone-log-error.po
+++ b/keystone-moon/keystone/locale/pt_BR/LC_MESSAGES/keystone-log-error.po
@@ -7,66 +7,18 @@ msgid ""
msgstr ""
"Project-Id-Version: Keystone\n"
"Report-Msgid-Bugs-To: https://bugs.launchpad.net/keystone\n"
-"POT-Creation-Date: 2015-03-09 06:03+0000\n"
-"PO-Revision-Date: 2015-03-07 04:31+0000\n"
+"POT-Creation-Date: 2015-08-06 06:28+0000\n"
+"PO-Revision-Date: 2015-06-26 17:13+0000\n"
"Last-Translator: openstackjenkins <jenkins@openstack.org>\n"
-"Language-Team: Portuguese (Brazil) (http://www.transifex.com/projects/p/"
+"Language-Team: Portuguese (Brazil) (http://www.transifex.com/openstack/"
"keystone/language/pt_BR/)\n"
"Language: pt_BR\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
-"Generated-By: Babel 1.3\n"
+"Generated-By: Babel 2.0\n"
"Plural-Forms: nplurals=2; plural=(n > 1);\n"
-#: keystone/notifications.py:304
-msgid "Failed to construct notifier"
-msgstr ""
-
-#: keystone/notifications.py:389
-#, python-format
-msgid "Failed to send %(res_id)s %(event_type)s notification"
-msgstr "Falha ao enviar notificação %(res_id)s %(event_type)s"
-
-#: keystone/notifications.py:606
-#, python-format
-msgid "Failed to send %(action)s %(event_type)s notification"
-msgstr ""
-
-#: keystone/catalog/core.py:62
-#, python-format
-msgid "Malformed endpoint - %(url)r is not a string"
-msgstr ""
-
-#: keystone/catalog/core.py:66
-#, python-format
-msgid "Malformed endpoint %(url)s - unknown key %(keyerror)s"
-msgstr "Endpoint mal formado %(url)s - chave desconhecida %(keyerror)s"
-
-#: keystone/catalog/core.py:71
-#, python-format
-msgid ""
-"Malformed endpoint '%(url)s'. The following type error occurred during "
-"string substitution: %(typeerror)s"
-msgstr ""
-
-#: keystone/catalog/core.py:77
-#, python-format
-msgid ""
-"Malformed endpoint %s - incomplete format (are you missing a type notifier ?)"
-msgstr ""
-
-#: keystone/common/openssl.py:93
-#, python-format
-msgid "Command %(to_exec)s exited with %(retcode)s- %(output)s"
-msgstr ""
-
-#: keystone/common/openssl.py:121
-#, python-format
-msgid "Failed to remove file %(file_path)r: %(error)s"
-msgstr ""
-
-#: keystone/common/utils.py:239
msgid ""
"Error setting up the debug environment. Verify that the option --debug-url "
"has the format <host>:<port> and that a debugger processes is listening on "
@@ -76,104 +28,29 @@ msgstr ""
"possui o formato <host>:<port> e que o processo debugger está escutando "
"nesta porta."
-#: keystone/common/cache/core.py:100
#, python-format
-msgid ""
-"Unable to build cache config-key. Expected format \"<argname>:<value>\". "
-"Skipping unknown format: %s"
-msgstr ""
-"Não é possível construir chave de configuração do cache. Formato esperado "
-"\"<argname>:<value>\". Pulando formato desconhecido: %s"
+msgid "Failed to send %(res_id)s %(event_type)s notification"
+msgstr "Falha ao enviar notificação %(res_id)s %(event_type)s"
+
+msgid "Failed to validate token"
+msgstr "Falha ao validar token"
-#: keystone/common/environment/eventlet_server.py:99
#, python-format
-msgid "Could not bind to %(host)s:%(port)s"
-msgstr ""
+msgid "Malformed endpoint %(url)s - unknown key %(keyerror)s"
+msgstr "Endpoint mal formado %(url)s - chave desconhecida %(keyerror)s"
-#: keystone/common/environment/eventlet_server.py:185
msgid "Server error"
msgstr "Erro do servidor"
-#: keystone/contrib/endpoint_policy/core.py:129
-#: keystone/contrib/endpoint_policy/core.py:228
-#, python-format
-msgid ""
-"Circular reference or a repeated entry found in region tree - %(region_id)s."
-msgstr ""
-
-#: keystone/contrib/federation/idp.py:410
-#, python-format
-msgid "Error when signing assertion, reason: %(reason)s"
-msgstr ""
-
-#: keystone/contrib/oauth1/core.py:136
-msgid "Cannot retrieve Authorization headers"
-msgstr ""
-
-#: keystone/openstack/common/loopingcall.py:95
-msgid "in fixed duration looping call"
-msgstr "em uma chamada de laço de duração fixa"
-
-#: keystone/openstack/common/loopingcall.py:138
-msgid "in dynamic looping call"
-msgstr "em chamada de laço dinâmico"
-
-#: keystone/openstack/common/service.py:268
-msgid "Unhandled exception"
-msgstr "Exceção não tratada"
-
-#: keystone/resource/core.py:477
-#, python-format
-msgid ""
-"Circular reference or a repeated entry found projects hierarchy - "
-"%(project_id)s."
-msgstr ""
-
-#: keystone/resource/core.py:939
-#, python-format
-msgid ""
-"Unexpected results in response for domain config - %(count)s responses, "
-"first option is %(option)s, expected option %(expected)s"
-msgstr ""
-
-#: keystone/resource/backends/sql.py:102 keystone/resource/backends/sql.py:121
#, python-format
msgid ""
-"Circular reference or a repeated entry found in projects hierarchy - "
-"%(project_id)s."
+"Unable to build cache config-key. Expected format \"<argname>:<value>\". "
+"Skipping unknown format: %s"
msgstr ""
+"Não é possível construir chave de configuração do cache. Formato esperado "
+"\"<argname>:<value>\". Pulando formato desconhecido: %s"
-#: keystone/token/provider.py:292
#, python-format
msgid "Unexpected error or malformed token determining token expiry: %s"
msgstr ""
"Erro inesperado ou token mal formado ao determinar validade do token: %s"
-
-#: keystone/token/persistence/backends/kvs.py:226
-#, python-format
-msgid ""
-"Reinitializing revocation list due to error in loading revocation list from "
-"backend. Expected `list` type got `%(type)s`. Old revocation list data: "
-"%(list)r"
-msgstr ""
-
-#: keystone/token/providers/common.py:611
-msgid "Failed to validate token"
-msgstr "Falha ao validar token"
-
-#: keystone/token/providers/pki.py:47
-msgid "Unable to sign token"
-msgstr ""
-
-#: keystone/token/providers/fernet/utils.py:38
-#, python-format
-msgid ""
-"Either [fernet_tokens] key_repository does not exist or Keystone does not "
-"have sufficient permission to access it: %s"
-msgstr ""
-
-#: keystone/token/providers/fernet/utils.py:79
-msgid ""
-"Failed to create [fernet_tokens] key_repository: either it already exists or "
-"you don't have sufficient permissions to create it"
-msgstr ""
diff --git a/keystone-moon/keystone/locale/pt_BR/LC_MESSAGES/keystone.po b/keystone-moon/keystone/locale/pt_BR/LC_MESSAGES/keystone.po
index fdb771c9..02ff0550 100644
--- a/keystone-moon/keystone/locale/pt_BR/LC_MESSAGES/keystone.po
+++ b/keystone-moon/keystone/locale/pt_BR/LC_MESSAGES/keystone.po
@@ -10,1537 +10,325 @@ msgid ""
msgstr ""
"Project-Id-Version: Keystone\n"
"Report-Msgid-Bugs-To: https://bugs.launchpad.net/keystone\n"
-"POT-Creation-Date: 2015-03-23 06:04+0000\n"
-"PO-Revision-Date: 2015-03-21 23:03+0000\n"
+"POT-Creation-Date: 2015-08-06 06:28+0000\n"
+"PO-Revision-Date: 2015-08-04 18:01+0000\n"
"Last-Translator: openstackjenkins <jenkins@openstack.org>\n"
-"Language-Team: Portuguese (Brazil) "
-"(http://www.transifex.com/projects/p/keystone/language/pt_BR/)\n"
+"Language-Team: Portuguese (Brazil) (http://www.transifex.com/openstack/"
+"keystone/language/pt_BR/)\n"
"Plural-Forms: nplurals=2; plural=(n > 1)\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=utf-8\n"
"Content-Transfer-Encoding: 8bit\n"
-"Generated-By: Babel 1.3\n"
+"Generated-By: Babel 2.0\n"
-#: keystone/clean.py:24
-#, python-format
-msgid "%s cannot be empty."
-msgstr "%s não pode estar vazio."
-
-#: keystone/clean.py:26
#, python-format
msgid "%(property_name)s cannot be less than %(min_length)s characters."
msgstr "%(property_name)s não pode ter menos de %(min_length)s caracteres."
-#: keystone/clean.py:31
-#, python-format
-msgid "%(property_name)s should not be greater than %(max_length)s characters."
-msgstr "%(property_name)s não deve ter mais de %(max_length)s caracteres."
-
-#: keystone/clean.py:40
#, python-format
msgid "%(property_name)s is not a %(display_expected_type)s"
msgstr "%(property_name)s não é um %(display_expected_type)s"
-#: keystone/cli.py:283
-msgid "At least one option must be provided"
-msgstr ""
-
-#: keystone/cli.py:290
-msgid "--all option cannot be mixed with other options"
-msgstr ""
-
-#: keystone/cli.py:301
-#, python-format
-msgid "Unknown domain '%(name)s' specified by --domain-name"
-msgstr ""
-
-#: keystone/cli.py:365 keystone/tests/unit/test_cli.py:213
-msgid "At least one option must be provided, use either --all or --domain-name"
-msgstr ""
-
-#: keystone/cli.py:371 keystone/tests/unit/test_cli.py:229
-msgid "The --all option cannot be used with the --domain-name option"
-msgstr ""
-
-#: keystone/cli.py:397 keystone/tests/unit/test_cli.py:246
-#, python-format
-msgid ""
-"Invalid domain name: %(domain)s found in config file name: %(file)s - "
-"ignoring this file."
-msgstr ""
-
-#: keystone/cli.py:405 keystone/tests/unit/test_cli.py:187
-#, python-format
-msgid ""
-"Domain: %(domain)s already has a configuration defined - ignoring file: "
-"%(file)s."
-msgstr ""
-
-#: keystone/cli.py:419
-#, python-format
-msgid "Error parsing configuration file for domain: %(domain)s, file: %(file)s."
-msgstr ""
-
-#: keystone/cli.py:452
-#, python-format
-msgid ""
-"To get a more detailed information on this error, re-run this command for"
-" the specific domain, i.e.: keystone-manage domain_config_upload "
-"--domain-name %s"
-msgstr ""
-
-#: keystone/cli.py:470
-#, python-format
-msgid "Unable to locate domain config directory: %s"
-msgstr "Não é possível localizar diretório de configuração de domínio: %s"
-
-#: keystone/cli.py:503
-msgid ""
-"Unable to access the keystone database, please check it is configured "
-"correctly."
-msgstr ""
-
-#: keystone/exception.py:79
-#, python-format
-msgid ""
-"Expecting to find %(attribute)s in %(target)s - the server could not "
-"comply with the request since it is either malformed or otherwise "
-"incorrect. The client is assumed to be in error."
-msgstr ""
-
-#: keystone/exception.py:90
-#, python-format
-msgid "%(detail)s"
-msgstr ""
-
-#: keystone/exception.py:94
-msgid ""
-"Timestamp not in expected format. The server could not comply with the "
-"request since it is either malformed or otherwise incorrect. The client "
-"is assumed to be in error."
-msgstr ""
-"A data não está no formato especificado. O servidor não pôde realizar a "
-"requisição pois ela está mal formada ou incorreta. Assume-se que o "
-"cliente está com erro."
-
-#: keystone/exception.py:103
#, python-format
-msgid ""
-"String length exceeded.The length of string '%(string)s' exceeded the "
-"limit of column %(type)s(CHAR(%(length)d))."
-msgstr ""
-"Comprimento de string excedido. O comprimento de string '%(string)s' "
-"excedeu o limite da coluna %(type)s(CHAR(%(length)d))."
-
-#: keystone/exception.py:109
-#, python-format
-msgid ""
-"Request attribute %(attribute)s must be less than or equal to %(size)i. "
-"The server could not comply with the request because the attribute size "
-"is invalid (too large). The client is assumed to be in error."
-msgstr ""
-"Atributo de requisição %(attribute)s deve ser menor ou igual a %(size)i. "
-"O servidor não pôde atender a requisição porque o tamanho do atributo é "
-"inválido (muito grande). Assume-se que o cliente está em erro."
-
-#: keystone/exception.py:119
-#, python-format
-msgid ""
-"The specified parent region %(parent_region_id)s would create a circular "
-"region hierarchy."
-msgstr ""
-
-#: keystone/exception.py:126
-#, python-format
-msgid ""
-"The password length must be less than or equal to %(size)i. The server "
-"could not comply with the request because the password is invalid."
-msgstr ""
-
-#: keystone/exception.py:134
-#, python-format
-msgid ""
-"Unable to delete region %(region_id)s because it or its child regions "
-"have associated endpoints."
-msgstr ""
-
-#: keystone/exception.py:141
-msgid ""
-"The certificates you requested are not available. It is likely that this "
-"server does not use PKI tokens otherwise this is the result of "
-"misconfiguration."
-msgstr ""
-
-#: keystone/exception.py:150
-msgid "(Disable debug mode to suppress these details.)"
-msgstr ""
+msgid "%(property_name)s should not be greater than %(max_length)s characters."
+msgstr "%(property_name)s não deve ter mais de %(max_length)s caracteres."
-#: keystone/exception.py:155
#, python-format
-msgid "%(message)s %(amendment)s"
-msgstr ""
-
-#: keystone/exception.py:163
-msgid "The request you have made requires authentication."
-msgstr "A requisição que você fez requer autenticação."
-
-#: keystone/exception.py:169
-msgid "Authentication plugin error."
-msgstr "Erro do plugin de autenticação."
+msgid "%s cannot be empty."
+msgstr "%s não pode estar vazio."
-#: keystone/exception.py:177
-#, python-format
-msgid "Unable to find valid groups while using mapping %(mapping_id)s"
-msgstr ""
+msgid "Access token is expired"
+msgstr "Token de acesso expirou"
-#: keystone/exception.py:182
-msgid "Attempted to authenticate with an unsupported method."
-msgstr "Tentativa de autenticação com um método não suportado."
+msgid "Access token not found"
+msgstr "Token de acesso não encontrado"
-#: keystone/exception.py:190
msgid "Additional authentications steps required."
msgstr "Passos de autenticação adicionais requeridos."
-#: keystone/exception.py:198
-msgid "You are not authorized to perform the requested action."
-msgstr "Você não está autorizado à realizar a ação solicitada."
-
-#: keystone/exception.py:205
-#, python-format
-msgid "You are not authorized to perform the requested action: %(action)s"
-msgstr ""
-
-#: keystone/exception.py:210
-#, python-format
-msgid ""
-"Could not change immutable attribute(s) '%(attributes)s' in target "
-"%(target)s"
-msgstr ""
-
-#: keystone/exception.py:215
-#, python-format
-msgid ""
-"Group membership across backend boundaries is not allowed, group in "
-"question is %(group_id)s, user is %(user_id)s"
-msgstr ""
-
-#: keystone/exception.py:221
-#, python-format
-msgid ""
-"Invalid mix of entities for policy association - only Endpoint, Service "
-"or Region+Service allowed. Request was - Endpoint: %(endpoint_id)s, "
-"Service: %(service_id)s, Region: %(region_id)s"
-msgstr ""
-
-#: keystone/exception.py:228
-#, python-format
-msgid "Invalid domain specific configuration: %(reason)s"
-msgstr ""
-
-#: keystone/exception.py:232
-#, python-format
-msgid "Could not find: %(target)s"
-msgstr ""
-
-#: keystone/exception.py:238
-#, python-format
-msgid "Could not find endpoint: %(endpoint_id)s"
-msgstr ""
-
-#: keystone/exception.py:245
msgid "An unhandled exception has occurred: Could not find metadata."
msgstr "Uma exceção não tratada ocorreu: Não foi possível encontrar metadados."
-#: keystone/exception.py:250
-#, python-format
-msgid "Could not find policy: %(policy_id)s"
-msgstr ""
-
-#: keystone/exception.py:254
-msgid "Could not find policy association"
-msgstr ""
-
-#: keystone/exception.py:258
-#, python-format
-msgid "Could not find role: %(role_id)s"
-msgstr ""
-
-#: keystone/exception.py:262
-#, python-format
-msgid ""
-"Could not find role assignment with role: %(role_id)s, user or group: "
-"%(actor_id)s, project or domain: %(target_id)s"
-msgstr ""
-
-#: keystone/exception.py:268
-#, python-format
-msgid "Could not find region: %(region_id)s"
-msgstr ""
-
-#: keystone/exception.py:272
-#, python-format
-msgid "Could not find service: %(service_id)s"
-msgstr ""
-
-#: keystone/exception.py:276
-#, python-format
-msgid "Could not find domain: %(domain_id)s"
-msgstr ""
-
-#: keystone/exception.py:280
-#, python-format
-msgid "Could not find project: %(project_id)s"
-msgstr ""
-
-#: keystone/exception.py:284
-#, python-format
-msgid "Cannot create project with parent: %(project_id)s"
-msgstr ""
-
-#: keystone/exception.py:288
-#, python-format
-msgid "Could not find token: %(token_id)s"
-msgstr ""
-
-#: keystone/exception.py:292
-#, python-format
-msgid "Could not find user: %(user_id)s"
-msgstr ""
-
-#: keystone/exception.py:296
-#, python-format
-msgid "Could not find group: %(group_id)s"
-msgstr ""
-
-#: keystone/exception.py:300
-#, python-format
-msgid "Could not find mapping: %(mapping_id)s"
-msgstr ""
-
-#: keystone/exception.py:304
-#, python-format
-msgid "Could not find trust: %(trust_id)s"
-msgstr ""
-
-#: keystone/exception.py:308
-#, python-format
-msgid "No remaining uses for trust: %(trust_id)s"
-msgstr ""
-
-#: keystone/exception.py:312
-#, python-format
-msgid "Could not find credential: %(credential_id)s"
-msgstr ""
-
-#: keystone/exception.py:316
-#, python-format
-msgid "Could not find version: %(version)s"
-msgstr ""
-
-#: keystone/exception.py:320
-#, python-format
-msgid "Could not find Endpoint Group: %(endpoint_group_id)s"
-msgstr ""
-
-#: keystone/exception.py:324
-#, python-format
-msgid "Could not find Identity Provider: %(idp_id)s"
-msgstr ""
-
-#: keystone/exception.py:328
-#, python-format
-msgid "Could not find Service Provider: %(sp_id)s"
-msgstr ""
-
-#: keystone/exception.py:332
-#, python-format
-msgid ""
-"Could not find federated protocol %(protocol_id)s for Identity Provider: "
-"%(idp_id)s"
-msgstr ""
-
-#: keystone/exception.py:343
-#, python-format
-msgid ""
-"Could not find %(group_or_option)s in domain configuration for domain "
-"%(domain_id)s"
-msgstr ""
-
-#: keystone/exception.py:348
-#, python-format
-msgid "Conflict occurred attempting to store %(type)s - %(details)s"
-msgstr ""
-
-#: keystone/exception.py:356
-msgid "An unexpected error prevented the server from fulfilling your request."
-msgstr ""
-
-#: keystone/exception.py:359
-#, python-format
-msgid ""
-"An unexpected error prevented the server from fulfilling your request: "
-"%(exception)s"
-msgstr ""
-
-#: keystone/exception.py:382
-#, python-format
-msgid "Unable to consume trust %(trust_id)s, unable to acquire lock."
-msgstr ""
-
-#: keystone/exception.py:387
-msgid ""
-"Expected signing certificates are not available on the server. Please "
-"check Keystone configuration."
-msgstr ""
-
-#: keystone/exception.py:393
-#, python-format
-msgid "Malformed endpoint URL (%(endpoint)s), see ERROR log for details."
-msgstr ""
-"URL de endpoint mal-formada (%(endpoint)s), veja o log de ERROS para "
-"detalhes."
-
-#: keystone/exception.py:398
-#, python-format
-msgid ""
-"Group %(group_id)s returned by mapping %(mapping_id)s was not found in "
-"the backend."
-msgstr ""
-
-#: keystone/exception.py:403
-#, python-format
-msgid "Error while reading metadata file, %(reason)s"
-msgstr ""
-
-#: keystone/exception.py:407
-#, python-format
-msgid ""
-"Unexpected combination of grant attributes - User: %(user_id)s, Group: "
-"%(group_id)s, Project: %(project_id)s, Domain: %(domain_id)s"
-msgstr ""
-
-#: keystone/exception.py:414
-msgid "The action you have requested has not been implemented."
-msgstr "A ação que você solicitou não foi implementada."
-
-#: keystone/exception.py:421
-msgid "The service you have requested is no longer available on this server."
-msgstr ""
-
-#: keystone/exception.py:428
-#, python-format
-msgid "The Keystone configuration file %(config_file)s could not be found."
-msgstr ""
-
-#: keystone/exception.py:433
-msgid ""
-"No encryption keys found; run keystone-manage fernet_setup to bootstrap "
-"one."
-msgstr ""
-
-#: keystone/exception.py:438
-#, python-format
-msgid ""
-"The Keystone domain-specific configuration has specified more than one "
-"SQL driver (only one is permitted): %(source)s."
-msgstr ""
+msgid "Attempted to authenticate with an unsupported method."
+msgstr "Tentativa de autenticação com um método não suportado."
-#: keystone/exception.py:445
-#, python-format
-msgid ""
-"%(mod_name)s doesn't provide database migrations. The migration "
-"repository path at %(path)s doesn't exist or isn't a directory."
-msgstr ""
+msgid "Authentication plugin error."
+msgstr "Erro do plugin de autenticação."
-#: keystone/exception.py:457
#, python-format
-msgid ""
-"Unable to sign SAML assertion. It is likely that this server does not "
-"have xmlsec1 installed, or this is the result of misconfiguration. Reason"
-" %(reason)s"
-msgstr ""
-
-#: keystone/exception.py:465
-msgid ""
-"No Authorization headers found, cannot proceed with OAuth related calls, "
-"if running under HTTPd or Apache, ensure WSGIPassAuthorization is set to "
-"On."
-msgstr ""
+msgid "Cannot change %(option_name)s %(attr)s"
+msgstr "Não é possível alterar %(option_name)s %(attr)s"
-#: keystone/notifications.py:250
-#, python-format
-msgid "%(event)s is not a valid notification event, must be one of: %(actions)s"
-msgstr ""
+msgid "Cannot change consumer secret"
+msgstr "Não é possível alterar segredo do consumidor"
-#: keystone/notifications.py:259
#, python-format
-msgid "Method not callable: %s"
-msgstr ""
-
-#: keystone/assignment/controllers.py:107 keystone/identity/controllers.py:69
-#: keystone/resource/controllers.py:78
-msgid "Name field is required and cannot be empty"
-msgstr "Campo nome é requerido e não pode ser vazio"
-
-#: keystone/assignment/controllers.py:330
-#: keystone/assignment/controllers.py:753
-msgid "Specify a domain or project, not both"
-msgstr "Especifique um domínio ou projeto, não ambos"
-
-#: keystone/assignment/controllers.py:333
-msgid "Specify one of domain or project"
-msgstr ""
-
-#: keystone/assignment/controllers.py:338
-#: keystone/assignment/controllers.py:758
-msgid "Specify a user or group, not both"
-msgstr "Epecifique um usuário ou grupo, não ambos"
-
-#: keystone/assignment/controllers.py:341
-msgid "Specify one of user or group"
-msgstr ""
-
-#: keystone/assignment/controllers.py:742
-msgid "Combining effective and group filter will always result in an empty list."
-msgstr ""
+msgid "Cannot remove role that has not been granted, %s"
+msgstr "Não é possível remover role que não foi concedido, %s"
-#: keystone/assignment/controllers.py:747
-msgid ""
-"Combining effective, domain and inherited filters will always result in "
-"an empty list."
-msgstr ""
+msgid "Consumer not found"
+msgstr "Consumidor não encontrado"
-#: keystone/assignment/core.py:228
-msgid "Must specify either domain or project"
-msgstr ""
+msgid "Could not find role"
+msgstr "Não é possível encontrar role"
-#: keystone/assignment/core.py:493
-#, python-format
-msgid "Project (%s)"
-msgstr "Projeto (%s)"
+msgid "Credential belongs to another user"
+msgstr "A credencial pertence à outro usuário"
-#: keystone/assignment/core.py:495
#, python-format
msgid "Domain (%s)"
msgstr "Domínio (%s)"
-#: keystone/assignment/core.py:497
-msgid "Unknown Target"
-msgstr "Alvo Desconhecido"
-
-#: keystone/assignment/backends/ldap.py:92
-msgid "Domain metadata not supported by LDAP"
-msgstr ""
-
-#: keystone/assignment/backends/ldap.py:381
-#, python-format
-msgid "User %(user_id)s already has role %(role_id)s in tenant %(tenant_id)s"
-msgstr ""
-
-#: keystone/assignment/backends/ldap.py:387
#, python-format
-msgid "Role %s not found"
-msgstr "Role %s não localizada"
-
-#: keystone/assignment/backends/ldap.py:402
-#: keystone/assignment/backends/sql.py:335
-#, python-format
-msgid "Cannot remove role that has not been granted, %s"
-msgstr "Não é possível remover role que não foi concedido, %s"
+msgid "Domain is disabled: %s"
+msgstr "O domínio está desativado: %s"
-#: keystone/assignment/backends/sql.py:356
-#, python-format
-msgid "Unexpected assignment type encountered, %s"
-msgstr ""
+msgid "Domain scoped token is not supported"
+msgstr "O token de escopo de domínio não é suportado"
-#: keystone/assignment/role_backends/ldap.py:61 keystone/catalog/core.py:103
-#: keystone/common/ldap/core.py:1400 keystone/resource/backends/ldap.py:149
#, python-format
msgid "Duplicate ID, %s."
msgstr "ID duplicado, %s."
-#: keystone/assignment/role_backends/ldap.py:69
-#: keystone/common/ldap/core.py:1390
#, python-format
msgid "Duplicate name, %s."
msgstr "Nome duplicado, %s."
-#: keystone/assignment/role_backends/ldap.py:119
-#, python-format
-msgid "Cannot duplicate name %s"
-msgstr ""
-
-#: keystone/auth/controllers.py:60
-#, python-format
-msgid ""
-"Cannot load an auth-plugin by class-name without a \"method\" attribute "
-"defined: %s"
-msgstr ""
-
-#: keystone/auth/controllers.py:71
-#, python-format
-msgid ""
-"Auth plugin %(plugin)s is requesting previously registered method "
-"%(method)s"
-msgstr ""
-
-#: keystone/auth/controllers.py:115
-#, python-format
-msgid ""
-"Unable to reconcile identity attribute %(attribute)s as it has "
-"conflicting values %(new)s and %(old)s"
-msgstr ""
-
-#: keystone/auth/controllers.py:336
-msgid "Scoping to both domain and project is not allowed"
-msgstr "A definição de escopo para o domínio e o projeto não é permitida"
-
-#: keystone/auth/controllers.py:339
-msgid "Scoping to both domain and trust is not allowed"
-msgstr "A definição de escopo para o domínio e a trust não é permitida"
-
-#: keystone/auth/controllers.py:342
-msgid "Scoping to both project and trust is not allowed"
-msgstr "A definição de escopo para o projeto e a trust não é permitida"
-
-#: keystone/auth/controllers.py:512
-msgid "User not found"
-msgstr "Usuário não localizado"
-
-#: keystone/auth/controllers.py:616
-msgid "A project-scoped token is required to produce a service catalog."
-msgstr ""
-
-#: keystone/auth/plugins/external.py:46
-msgid "No authenticated user"
-msgstr "Nenhum usuário autenticado"
-
-#: keystone/auth/plugins/external.py:56
-#, python-format
-msgid "Unable to lookup user %s"
-msgstr "Não é possível consultar o usuário %s"
-
-#: keystone/auth/plugins/external.py:107
-msgid "auth_type is not Negotiate"
-msgstr ""
-
-#: keystone/auth/plugins/mapped.py:244
-msgid "Could not map user"
-msgstr ""
-
-#: keystone/auth/plugins/oauth1.py:39
-#, python-format
-msgid "%s not supported"
-msgstr ""
-
-#: keystone/auth/plugins/oauth1.py:57
-msgid "Access token is expired"
-msgstr "Token de acesso expirou"
-
-#: keystone/auth/plugins/oauth1.py:71
-msgid "Could not validate the access token"
-msgstr ""
-
-#: keystone/auth/plugins/password.py:46
-msgid "Invalid username or password"
-msgstr "Nome de usuário ou senha inválidos"
-
-#: keystone/auth/plugins/token.py:72 keystone/token/controllers.py:160
-msgid "rescope a scoped token"
-msgstr ""
-
-#: keystone/catalog/controllers.py:168
-#, python-format
-msgid "Conflicting region IDs specified: \"%(url_id)s\" != \"%(ref_id)s\""
-msgstr ""
-
-#: keystone/common/authorization.py:47 keystone/common/wsgi.py:64
-#, python-format
-msgid "token reference must be a KeystoneToken type, got: %s"
-msgstr ""
-
-#: keystone/common/base64utils.py:66
-msgid "pad must be single character"
-msgstr ""
-
-#: keystone/common/base64utils.py:215
-#, python-format
-msgid "text is multiple of 4, but pad \"%s\" occurs before 2nd to last char"
-msgstr ""
+msgid "Enabled field must be a boolean"
+msgstr "Campo habilitado precisa ser um booleano"
-#: keystone/common/base64utils.py:219
-#, python-format
-msgid "text is multiple of 4, but pad \"%s\" occurs before non-pad last char"
-msgstr ""
+msgid "Enabled field should be a boolean"
+msgstr "Campo habilitado deve ser um booleano"
-#: keystone/common/base64utils.py:225
#, python-format
-msgid "text is not a multiple of 4, but contains pad \"%s\""
-msgstr ""
-
-#: keystone/common/base64utils.py:244 keystone/common/base64utils.py:265
-msgid "padded base64url text must be multiple of 4 characters"
-msgstr ""
-
-#: keystone/common/controller.py:237 keystone/token/providers/common.py:589
-msgid "Non-default domain is not supported"
-msgstr "O domínio não padrão não é suportado"
+msgid "Endpoint %(endpoint_id)s not found in project %(project_id)s"
+msgstr "Endpoint %(endpoint_id)s não encontrado no projeto %(project_id)s"
-#: keystone/common/controller.py:305 keystone/identity/core.py:428
-#: keystone/resource/core.py:761 keystone/resource/backends/ldap.py:61
#, python-format
msgid "Expected dict or list: %s"
msgstr "Esperado dict ou list: %s"
-#: keystone/common/controller.py:318
-msgid "Marker could not be found"
-msgstr "Marcador não pôde ser encontrado"
-
-#: keystone/common/controller.py:329
-msgid "Invalid limit value"
-msgstr "Valor limite inválido"
-
-#: keystone/common/controller.py:637
-msgid "Cannot change Domain ID"
-msgstr ""
-
-#: keystone/common/controller.py:666
-msgid "domain_id is required as part of entity"
-msgstr ""
-
-#: keystone/common/controller.py:701
-msgid "A domain-scoped token must be used"
-msgstr ""
-
-#: keystone/common/dependency.py:68
-#, python-format
-msgid "Unregistered dependency: %(name)s for %(targets)s"
-msgstr ""
-
-#: keystone/common/dependency.py:108
-msgid "event_callbacks must be a dict"
-msgstr ""
-
-#: keystone/common/dependency.py:113
-#, python-format
-msgid "event_callbacks[%s] must be a dict"
-msgstr ""
-
-#: keystone/common/pemutils.py:223
-#, python-format
-msgid "unknown pem_type \"%(pem_type)s\", valid types are: %(valid_pem_types)s"
-msgstr ""
-
-#: keystone/common/pemutils.py:242
-#, python-format
-msgid ""
-"unknown pem header \"%(pem_header)s\", valid headers are: "
-"%(valid_pem_headers)s"
-msgstr ""
-
-#: keystone/common/pemutils.py:298
-#, python-format
-msgid "failed to find end matching \"%s\""
-msgstr ""
-
-#: keystone/common/pemutils.py:302
-#, python-format
-msgid ""
-"beginning & end PEM headers do not match (%(begin_pem_header)s!= "
-"%(end_pem_header)s)"
-msgstr ""
-
-#: keystone/common/pemutils.py:377
-#, python-format
-msgid "unknown pem_type: \"%s\""
-msgstr ""
-
-#: keystone/common/pemutils.py:389
-#, python-format
-msgid ""
-"failed to base64 decode %(pem_type)s PEM at position%(position)d: "
-"%(err_msg)s"
-msgstr ""
-
-#: keystone/common/utils.py:164 keystone/credential/controllers.py:44
-msgid "Invalid blob in credential"
-msgstr "BLOB inválido na credencial"
-
-#: keystone/common/wsgi.py:330
-#, python-format
-msgid "%s field is required and cannot be empty"
-msgstr ""
-
-#: keystone/common/wsgi.py:342
-#, python-format
-msgid "%s field(s) cannot be empty"
-msgstr ""
-
-#: keystone/common/wsgi.py:563
-msgid "The resource could not be found."
-msgstr "O recurso não pôde ser localizado."
-
-#: keystone/common/wsgi.py:704
-#, python-format
-msgid "Unexpected status requested for JSON Home response, %s"
-msgstr ""
-
-#: keystone/common/cache/_memcache_pool.py:113
-#, python-format
-msgid "Unable to get a connection from pool id %(id)s after %(seconds)s seconds."
-msgstr ""
-
-#: keystone/common/cache/core.py:132
-msgid "region not type dogpile.cache.CacheRegion"
-msgstr "região não é do tipo dogpile.cache.CacheRegion"
-
-#: keystone/common/cache/backends/mongo.py:231
-msgid "db_hosts value is required"
-msgstr ""
-
-#: keystone/common/cache/backends/mongo.py:236
-msgid "database db_name is required"
-msgstr ""
-
-#: keystone/common/cache/backends/mongo.py:241
-msgid "cache_collection name is required"
-msgstr ""
-
-#: keystone/common/cache/backends/mongo.py:252
-msgid "integer value expected for w (write concern attribute)"
-msgstr ""
-
-#: keystone/common/cache/backends/mongo.py:260
-msgid "replicaset_name required when use_replica is True"
-msgstr ""
-
-#: keystone/common/cache/backends/mongo.py:275
-msgid "integer value expected for mongo_ttl_seconds"
-msgstr ""
-
-#: keystone/common/cache/backends/mongo.py:301
-msgid "no ssl support available"
-msgstr ""
-
-#: keystone/common/cache/backends/mongo.py:310
-#, python-format
-msgid ""
-"Invalid ssl_cert_reqs value of %s, must be one of \"NONE\", \"OPTIONAL\","
-" \"REQUIRED\""
-msgstr ""
-
-#: keystone/common/kvs/core.py:71
-#, python-format
-msgid "Lock Timeout occurred for key, %(target)s"
-msgstr ""
-
-#: keystone/common/kvs/core.py:106
-#, python-format
-msgid "KVS region %s is already configured. Cannot reconfigure."
-msgstr ""
-
-#: keystone/common/kvs/core.py:145
-#, python-format
-msgid "Key Value Store not configured: %s"
-msgstr ""
-
-#: keystone/common/kvs/core.py:198
-msgid "`key_mangler` option must be a function reference"
-msgstr ""
-
-#: keystone/common/kvs/core.py:353
-#, python-format
-msgid "Lock key must match target key: %(lock)s != %(target)s"
-msgstr ""
-
-#: keystone/common/kvs/core.py:357
-msgid "Must be called within an active lock context."
-msgstr ""
-
-#: keystone/common/kvs/backends/memcached.py:69
-#, python-format
-msgid "Maximum lock attempts on %s occurred."
-msgstr ""
-
-#: keystone/common/kvs/backends/memcached.py:108
-#, python-format
-msgid ""
-"Backend `%(driver)s` is not a valid memcached backend. Valid drivers: "
-"%(driver_list)s"
-msgstr ""
-
-#: keystone/common/kvs/backends/memcached.py:178
-msgid "`key_mangler` functions must be callable."
-msgstr ""
-
-#: keystone/common/ldap/core.py:191
-#, python-format
-msgid "Invalid LDAP deref option: %(option)s. Choose one of: %(options)s"
-msgstr ""
+msgid "Failed to validate token"
+msgstr "Falha ao validar token"
-#: keystone/common/ldap/core.py:201
#, python-format
msgid "Invalid LDAP TLS certs option: %(option)s. Choose one of: %(options)s"
msgstr ""
"Opção de certificado LADP TLS inválida: %(option)s. Escolha uma de: "
"%(options)s"
-#: keystone/common/ldap/core.py:213
+#, python-format
+msgid "Invalid LDAP TLS_AVAIL option: %s. TLS not available"
+msgstr "Opção LDAP TLS_AVAIL inválida: %s. TLS não dsponível"
+
#, python-format
msgid "Invalid LDAP scope: %(scope)s. Choose one of: %(options)s"
msgstr "Escopo LDAP inválido: %(scope)s. Escolha um de: %(options)s"
-#: keystone/common/ldap/core.py:588
msgid "Invalid TLS / LDAPS combination"
msgstr "Combinação TLS / LADPS inválida"
-#: keystone/common/ldap/core.py:593
-#, python-format
-msgid "Invalid LDAP TLS_AVAIL option: %s. TLS not available"
-msgstr "Opção LDAP TLS_AVAIL inválida: %s. TLS não dsponível"
-
-#: keystone/common/ldap/core.py:603
-#, python-format
-msgid "tls_cacertfile %s not found or is not a file"
-msgstr "tls_cacertfile %s não encontrada ou não é um arquivo"
+msgid "Invalid blob in credential"
+msgstr "BLOB inválido na credencial"
-#: keystone/common/ldap/core.py:615
-#, python-format
-msgid "tls_cacertdir %s not found or is not a directory"
-msgstr "tls_cacertdir %s não encontrado ou não é um diretório"
+msgid "Invalid limit value"
+msgstr "Valor limite inválido"
-#: keystone/common/ldap/core.py:1325
-#, python-format
-msgid "ID attribute %(id_attr)s not found in LDAP object %(dn)s"
-msgstr ""
+msgid "Invalid username or password"
+msgstr "Nome de usuário ou senha inválidos"
-#: keystone/common/ldap/core.py:1369
#, python-format
msgid "LDAP %s create"
msgstr "Criação de LDAP %s"
-#: keystone/common/ldap/core.py:1374
-#, python-format
-msgid "LDAP %s update"
-msgstr "Atualização de LDAP %s"
-
-#: keystone/common/ldap/core.py:1379
#, python-format
msgid "LDAP %s delete"
msgstr "Exclusão de LDAP %s"
-#: keystone/common/ldap/core.py:1521
-msgid ""
-"Disabling an entity where the 'enable' attribute is ignored by "
-"configuration."
-msgstr ""
-
-#: keystone/common/ldap/core.py:1532
#, python-format
-msgid "Cannot change %(option_name)s %(attr)s"
-msgstr "Não é possível alterar %(option_name)s %(attr)s"
-
-#: keystone/common/ldap/core.py:1619
-#, python-format
-msgid "Member %(member)s is already a member of group %(group)s"
-msgstr ""
-
-#: keystone/common/sql/core.py:219
-msgid ""
-"Cannot truncate a driver call without hints list as first parameter after"
-" self "
-msgstr ""
-
-#: keystone/common/sql/core.py:410
-msgid "Duplicate Entry"
-msgstr ""
-
-#: keystone/common/sql/core.py:426
-#, python-format
-msgid "An unexpected error occurred when trying to store %s"
-msgstr ""
-
-#: keystone/common/sql/migration_helpers.py:187
-#: keystone/common/sql/migration_helpers.py:245
-#, python-format
-msgid "%s extension does not exist."
-msgstr ""
+msgid "LDAP %s update"
+msgstr "Atualização de LDAP %s"
-#: keystone/common/validation/validators.py:54
#, python-format
-msgid "Invalid input for field '%(path)s'. The value is '%(value)s'."
+msgid "Malformed endpoint URL (%(endpoint)s), see ERROR log for details."
msgstr ""
+"URL de endpoint mal-formada (%(endpoint)s), veja o log de ERROS para "
+"detalhes."
-#: keystone/contrib/ec2/controllers.py:318
-msgid "Token belongs to another user"
-msgstr "O token pertence à outro usuário"
-
-#: keystone/contrib/ec2/controllers.py:346
-msgid "Credential belongs to another user"
-msgstr "A credencial pertence à outro usuário"
-
-#: keystone/contrib/endpoint_filter/backends/sql.py:69
-#, python-format
-msgid "Endpoint %(endpoint_id)s not found in project %(project_id)s"
-msgstr "Endpoint %(endpoint_id)s não encontrado no projeto %(project_id)s"
+msgid "Marker could not be found"
+msgstr "Marcador não pôde ser encontrado"
-#: keystone/contrib/endpoint_filter/backends/sql.py:180
-msgid "Endpoint Group Project Association not found"
-msgstr ""
+msgid "Name field is required and cannot be empty"
+msgstr "Campo nome é requerido e não pode ser vazio"
-#: keystone/contrib/endpoint_policy/core.py:258
-#, python-format
-msgid "No policy is associated with endpoint %(endpoint_id)s."
-msgstr ""
+msgid "No authenticated user"
+msgstr "Nenhum usuário autenticado"
-#: keystone/contrib/federation/controllers.py:274
-msgid "Missing entity ID from environment"
-msgstr ""
+msgid "No options specified"
+msgstr "Nenhuma opção especificada"
-#: keystone/contrib/federation/controllers.py:282
-msgid "Request must have an origin query parameter"
-msgstr ""
+msgid "Non-default domain is not supported"
+msgstr "O domínio não padrão não é suportado"
-#: keystone/contrib/federation/controllers.py:292
#, python-format
-msgid "%(host)s is not a trusted dashboard host"
-msgstr ""
-
-#: keystone/contrib/federation/controllers.py:333
-msgid "Use a project scoped token when attempting to create a SAML assertion"
-msgstr ""
+msgid "Project (%s)"
+msgstr "Projeto (%s)"
-#: keystone/contrib/federation/idp.py:454
#, python-format
-msgid "Cannot open certificate %(cert_file)s. Reason: %(reason)s"
-msgstr ""
-
-#: keystone/contrib/federation/idp.py:521
-msgid "Ensure configuration option idp_entity_id is set."
-msgstr ""
-
-#: keystone/contrib/federation/idp.py:524
-msgid "Ensure configuration option idp_sso_endpoint is set."
-msgstr ""
-
-#: keystone/contrib/federation/idp.py:544
-msgid ""
-"idp_contact_type must be one of: [technical, other, support, "
-"administrative or billing."
-msgstr ""
-
-#: keystone/contrib/federation/utils.py:178
-msgid "Federation token is expired"
-msgstr ""
-
-#: keystone/contrib/federation/utils.py:208
-msgid ""
-"Could not find Identity Provider identifier in environment, check "
-"[federation] remote_id_attribute for details."
-msgstr ""
-
-#: keystone/contrib/federation/utils.py:213
-msgid ""
-"Incoming identity provider identifier not included among the accepted "
-"identifiers."
-msgstr ""
+msgid "Project is disabled: %s"
+msgstr "O projeto está desativado: %s"
-#: keystone/contrib/federation/utils.py:501
-#, python-format
-msgid "User type %s not supported"
-msgstr ""
+msgid "Request Token does not have an authorizing user id"
+msgstr "Token de Requisição não possui um ID de usuário autorizado"
-#: keystone/contrib/federation/utils.py:537
#, python-format
msgid ""
-"Invalid rule: %(identity_value)s. Both 'groups' and 'domain' keywords "
-"must be specified."
-msgstr ""
-
-#: keystone/contrib/federation/utils.py:753
-#, python-format
-msgid "Identity Provider %(idp)s is disabled"
-msgstr ""
-
-#: keystone/contrib/federation/utils.py:761
-#, python-format
-msgid "Service Provider %(sp)s is disabled"
-msgstr ""
-
-#: keystone/contrib/oauth1/controllers.py:99
-msgid "Cannot change consumer secret"
-msgstr "Não é possível alterar segredo do consumidor"
-
-#: keystone/contrib/oauth1/controllers.py:131
-msgid "Cannot list request tokens with a token issued via delegation."
-msgstr ""
-
-#: keystone/contrib/oauth1/controllers.py:192
-#: keystone/contrib/oauth1/backends/sql.py:270
-msgid "User IDs do not match"
-msgstr "ID de usuário não confere"
-
-#: keystone/contrib/oauth1/controllers.py:199
-msgid "Could not find role"
-msgstr "Não é possível encontrar role"
-
-#: keystone/contrib/oauth1/controllers.py:248
-msgid "Invalid signature"
+"Request attribute %(attribute)s must be less than or equal to %(size)i. The "
+"server could not comply with the request because the attribute size is "
+"invalid (too large). The client is assumed to be in error."
msgstr ""
+"Atributo de requisição %(attribute)s deve ser menor ou igual a %(size)i. O "
+"servidor não pôde atender a requisição porque o tamanho do atributo é "
+"inválido (muito grande). Assume-se que o cliente está em erro."
-#: keystone/contrib/oauth1/controllers.py:299
-#: keystone/contrib/oauth1/controllers.py:377
msgid "Request token is expired"
msgstr "Token de requisição expirou"
-#: keystone/contrib/oauth1/controllers.py:313
-msgid "There should not be any non-oauth parameters"
-msgstr "Não deve haver nenhum parâmetro não oauth"
-
-#: keystone/contrib/oauth1/controllers.py:317
-msgid "provided consumer key does not match stored consumer key"
-msgstr ""
-"Chave de consumidor fornecida não confere com a chave de consumidor "
-"armazenada"
-
-#: keystone/contrib/oauth1/controllers.py:321
-msgid "provided verifier does not match stored verifier"
-msgstr "Verificador fornecido não confere com o verificador armazenado"
-
-#: keystone/contrib/oauth1/controllers.py:325
-msgid "provided request key does not match stored request key"
-msgstr ""
-"Chave de requisição do provedor não confere com a chave de requisição "
-"armazenada"
-
-#: keystone/contrib/oauth1/controllers.py:329
-msgid "Request Token does not have an authorizing user id"
-msgstr "Token de Requisição não possui um ID de usuário autorizado"
-
-#: keystone/contrib/oauth1/controllers.py:366
-msgid "Cannot authorize a request token with a token issued via delegation."
-msgstr ""
-
-#: keystone/contrib/oauth1/controllers.py:396
-msgid "authorizing user does not have role required"
-msgstr "Usuário autorizado não possui o role necessário"
-
-#: keystone/contrib/oauth1/controllers.py:409
-msgid "User is not a member of the requested project"
-msgstr "Usuário não é um membro do projeto requisitado"
-
-#: keystone/contrib/oauth1/backends/sql.py:91
-msgid "Consumer not found"
-msgstr "Consumidor não encontrado"
-
-#: keystone/contrib/oauth1/backends/sql.py:186
msgid "Request token not found"
msgstr "Token de requisição não encontrado"
-#: keystone/contrib/oauth1/backends/sql.py:250
-msgid "Access token not found"
-msgstr "Token de acesso não encontrado"
-
-#: keystone/contrib/revoke/controllers.py:33
-#, python-format
-msgid "invalid date format %s"
-msgstr ""
-
-#: keystone/contrib/revoke/core.py:150
-msgid ""
-"The revoke call must not have both domain_id and project_id. This is a "
-"bug in the Keystone server. The current request is aborted."
-msgstr ""
-
-#: keystone/contrib/revoke/core.py:218 keystone/token/provider.py:207
-#: keystone/token/provider.py:230 keystone/token/provider.py:296
-#: keystone/token/provider.py:303
-msgid "Failed to validate token"
-msgstr "Falha ao validar token"
-
-#: keystone/identity/controllers.py:72
-msgid "Enabled field must be a boolean"
-msgstr "Campo habilitado precisa ser um booleano"
-
-#: keystone/identity/controllers.py:98
-msgid "Enabled field should be a boolean"
-msgstr "Campo habilitado deve ser um booleano"
-
-#: keystone/identity/core.py:112
-#, python-format
-msgid "Database at /domains/%s/config"
-msgstr ""
-
-#: keystone/identity/core.py:287 keystone/identity/backends/ldap.py:59
-#: keystone/identity/backends/ldap.py:61 keystone/identity/backends/ldap.py:67
-#: keystone/identity/backends/ldap.py:69 keystone/identity/backends/sql.py:104
-#: keystone/identity/backends/sql.py:106
-msgid "Invalid user / password"
-msgstr ""
-
-#: keystone/identity/core.py:693
-#, python-format
-msgid "User is disabled: %s"
-msgstr "O usuário está desativado: %s"
-
-#: keystone/identity/core.py:735
-msgid "Cannot change user ID"
-msgstr ""
-
-#: keystone/identity/backends/ldap.py:99
-msgid "Cannot change user name"
-msgstr ""
-
-#: keystone/identity/backends/ldap.py:188 keystone/identity/backends/sql.py:188
-#: keystone/identity/backends/sql.py:206
#, python-format
-msgid "User '%(user_id)s' not found in group '%(group_id)s'"
-msgstr ""
-
-#: keystone/identity/backends/ldap.py:339
-#, python-format
-msgid "User %(user_id)s is already a member of group %(group_id)s"
-msgstr "Usuário %(user_id)s já é membro do grupo %(group_id)s"
-
-#: keystone/models/token_model.py:61
-msgid "Found invalid token: scoped to both project and domain."
-msgstr ""
+msgid "Role %s not found"
+msgstr "Role %s não localizada"
-#: keystone/openstack/common/versionutils.py:108
-#, python-format
-msgid ""
-"%(what)s is deprecated as of %(as_of)s in favor of %(in_favor_of)s and "
-"may be removed in %(remove_in)s."
-msgstr ""
-"%(what)s está deprecado desde %(as_of)s em favor de %(in_favor_of)s e "
-"pode ser removido em %(remove_in)s."
+msgid "Scoping to both domain and project is not allowed"
+msgstr "A definição de escopo para o domínio e o projeto não é permitida"
-#: keystone/openstack/common/versionutils.py:112
-#, python-format
-msgid ""
-"%(what)s is deprecated as of %(as_of)s and may be removed in "
-"%(remove_in)s. It will not be superseded."
-msgstr ""
-"%(what)s está deprecado desde %(as_of)s e pode ser removido em "
-"%(remove_in)s. Ele não será substituído."
+msgid "Scoping to both domain and trust is not allowed"
+msgstr "A definição de escopo para o domínio e a trust não é permitida"
-#: keystone/openstack/common/versionutils.py:116
-#, python-format
-msgid "%(what)s is deprecated as of %(as_of)s in favor of %(in_favor_of)s."
-msgstr ""
+msgid "Scoping to both project and trust is not allowed"
+msgstr "A definição de escopo para o projeto e a trust não é permitida"
-#: keystone/openstack/common/versionutils.py:119
-#, python-format
-msgid "%(what)s is deprecated as of %(as_of)s. It will not be superseded."
-msgstr ""
+msgid "Specify a domain or project, not both"
+msgstr "Especifique um domínio ou projeto, não ambos"
-#: keystone/openstack/common/versionutils.py:241
-#, python-format
-msgid "Deprecated: %s"
-msgstr "Deprecado: %s"
+msgid "Specify a user or group, not both"
+msgstr "Epecifique um usuário ou grupo, não ambos"
-#: keystone/openstack/common/versionutils.py:259
#, python-format
-msgid "Fatal call to deprecated config: %(msg)s"
-msgstr "Chamada fatal à configuração deprecada: %(msg)s"
-
-#: keystone/resource/controllers.py:231
-msgid ""
-"Cannot use parents_as_list and parents_as_ids query params at the same "
-"time."
-msgstr ""
-
-#: keystone/resource/controllers.py:237
msgid ""
-"Cannot use subtree_as_list and subtree_as_ids query params at the same "
-"time."
-msgstr ""
-
-#: keystone/resource/core.py:80
-#, python-format
-msgid "max hierarchy depth reached for %s branch."
-msgstr ""
-
-#: keystone/resource/core.py:97
-msgid "cannot create a project within a different domain than its parents."
+"String length exceeded.The length of string '%(string)s' exceeded the limit "
+"of column %(type)s(CHAR(%(length)d))."
msgstr ""
+"Comprimento de string excedido. O comprimento de string '%(string)s' excedeu "
+"o limite da coluna %(type)s(CHAR(%(length)d))."
-#: keystone/resource/core.py:101
-#, python-format
-msgid "cannot create a project in a branch containing a disabled project: %s"
-msgstr ""
-
-#: keystone/resource/core.py:123
-#, python-format
-msgid "Domain is disabled: %s"
-msgstr "O domínio está desativado: %s"
-
-#: keystone/resource/core.py:141
-#, python-format
-msgid "Domain cannot be named %s"
-msgstr ""
-
-#: keystone/resource/core.py:144
-#, python-format
-msgid "Domain cannot have ID %s"
-msgstr ""
-
-#: keystone/resource/core.py:156
-#, python-format
-msgid "Project is disabled: %s"
-msgstr "O projeto está desativado: %s"
-
-#: keystone/resource/core.py:176
-#, python-format
-msgid "cannot enable project %s since it has disabled parents"
-msgstr ""
-
-#: keystone/resource/core.py:184
-#, python-format
-msgid "cannot disable project %s since its subtree contains enabled projects"
-msgstr ""
-
-#: keystone/resource/core.py:195
-msgid "Update of `parent_id` is not allowed."
-msgstr ""
-
-#: keystone/resource/core.py:222
-#, python-format
-msgid "cannot delete the project %s since it is not a leaf in the hierarchy."
-msgstr ""
-
-#: keystone/resource/core.py:376
-msgid "Multiple domains are not supported"
-msgstr ""
-
-#: keystone/resource/core.py:429
-msgid "delete the default domain"
-msgstr ""
-
-#: keystone/resource/core.py:440
-msgid "cannot delete a domain that is enabled, please disable it first."
-msgstr ""
-
-#: keystone/resource/core.py:841
-msgid "No options specified"
-msgstr "Nenhuma opção especificada"
+msgid "The action you have requested has not been implemented."
+msgstr "A ação que você solicitou não foi implementada."
-#: keystone/resource/core.py:847
-#, python-format
-msgid ""
-"The value of group %(group)s specified in the config should be a "
-"dictionary of options"
-msgstr ""
+msgid "The request you have made requires authentication."
+msgstr "A requisição que você fez requer autenticação."
-#: keystone/resource/core.py:871
-#, python-format
-msgid ""
-"Option %(option)s found with no group specified while checking domain "
-"configuration request"
-msgstr ""
+msgid "The resource could not be found."
+msgstr "O recurso não pôde ser localizado."
-#: keystone/resource/core.py:878
-#, python-format
-msgid "Group %(group)s is not supported for domain specific configurations"
-msgstr ""
+msgid "There should not be any non-oauth parameters"
+msgstr "Não deve haver nenhum parâmetro não oauth"
-#: keystone/resource/core.py:885
-#, python-format
msgid ""
-"Option %(option)s in group %(group)s is not supported for domain specific"
-" configurations"
-msgstr ""
-
-#: keystone/resource/core.py:938
-msgid "An unexpected error occurred when retrieving domain configs"
-msgstr ""
-
-#: keystone/resource/core.py:1013 keystone/resource/core.py:1097
-#: keystone/resource/core.py:1167 keystone/resource/config_backends/sql.py:70
-#, python-format
-msgid "option %(option)s in group %(group)s"
+"Timestamp not in expected format. The server could not comply with the "
+"request since it is either malformed or otherwise incorrect. The client is "
+"assumed to be in error."
msgstr ""
+"A data não está no formato especificado. O servidor não pôde realizar a "
+"requisição pois ela está mal formada ou incorreta. Assume-se que o cliente "
+"está com erro."
-#: keystone/resource/core.py:1016 keystone/resource/core.py:1102
-#: keystone/resource/core.py:1163
-#, python-format
-msgid "group %(group)s"
-msgstr ""
+msgid "Token belongs to another user"
+msgstr "O token pertence à outro usuário"
-#: keystone/resource/core.py:1018
-msgid "any options"
-msgstr ""
+msgid "Token does not belong to specified tenant."
+msgstr "O token não pertence ao tenant especificado."
-#: keystone/resource/core.py:1062
-#, python-format
-msgid ""
-"Trying to update option %(option)s in group %(group)s, so that, and only "
-"that, option must be specified in the config"
-msgstr ""
+msgid "Trustee has no delegated roles."
+msgstr "Fiador não possui roles delegados."
-#: keystone/resource/core.py:1067
-#, python-format
-msgid ""
-"Trying to update group %(group)s, so that, and only that, group must be "
-"specified in the config"
-msgstr ""
+msgid "Trustor is disabled."
+msgstr "O fiador está desativado."
-#: keystone/resource/core.py:1076
#, python-format
-msgid ""
-"request to update group %(group)s, but config provided contains group "
-"%(group_other)s instead"
-msgstr ""
+msgid "Unable to locate domain config directory: %s"
+msgstr "Não é possível localizar diretório de configuração de domínio: %s"
-#: keystone/resource/core.py:1083
#, python-format
-msgid ""
-"Trying to update option %(option)s in group %(group)s, but config "
-"provided contains option %(option_other)s instead"
-msgstr ""
-
-#: keystone/resource/backends/ldap.py:151
-#: keystone/resource/backends/ldap.py:159
-#: keystone/resource/backends/ldap.py:163
-msgid "Domains are read-only against LDAP"
-msgstr ""
+msgid "Unable to lookup user %s"
+msgstr "Não é possível consultar o usuário %s"
-#: keystone/server/eventlet.py:77
-msgid ""
-"Running keystone via eventlet is deprecated as of Kilo in favor of "
-"running in a WSGI server (e.g. mod_wsgi). Support for keystone under "
-"eventlet will be removed in the \"M\"-Release."
-msgstr ""
+msgid "Unable to sign token."
+msgstr "Não é possível assinar o token."
-#: keystone/server/eventlet.py:90
-#, python-format
-msgid "Failed to start the %(name)s server"
-msgstr ""
+msgid "Unknown Target"
+msgstr "Alvo Desconhecido"
-#: keystone/token/controllers.py:391
#, python-format
msgid "User %(u_id)s is unauthorized for tenant %(t_id)s"
msgstr "Usuário %(u_id)s não está autorizado para o tenant %(t_id)s"
-#: keystone/token/controllers.py:410 keystone/token/controllers.py:413
-msgid "Token does not belong to specified tenant."
-msgstr "O token não pertence ao tenant especificado."
-
-#: keystone/token/persistence/backends/kvs.py:133
#, python-format
-msgid "Unknown token version %s"
-msgstr ""
+msgid "User %(user_id)s has no access to domain %(domain_id)s"
+msgstr "O usuário %(user_id)s não tem acesso ao domínio %(domain_id)s"
-#: keystone/token/providers/common.py:250
-#: keystone/token/providers/common.py:355
#, python-format
msgid "User %(user_id)s has no access to project %(project_id)s"
msgstr "O usuário %(user_id)s não tem acesso ao projeto %(project_id)s"
-#: keystone/token/providers/common.py:255
-#: keystone/token/providers/common.py:360
#, python-format
-msgid "User %(user_id)s has no access to domain %(domain_id)s"
-msgstr "O usuário %(user_id)s não tem acesso ao domínio %(domain_id)s"
-
-#: keystone/token/providers/common.py:282
-msgid "Trustor is disabled."
-msgstr "O fiador está desativado."
+msgid "User %(user_id)s is already a member of group %(group_id)s"
+msgstr "Usuário %(user_id)s já é membro do grupo %(group_id)s"
-#: keystone/token/providers/common.py:346
-msgid "Trustee has no delegated roles."
-msgstr "Fiador não possui roles delegados."
+msgid "User IDs do not match"
+msgstr "ID de usuário não confere"
-#: keystone/token/providers/common.py:407
#, python-format
-msgid "Invalid audit info data type: %(data)s (%(type)s)"
-msgstr ""
+msgid "User is disabled: %s"
+msgstr "O usuário está desativado: %s"
+
+msgid "User is not a member of the requested project"
+msgstr "Usuário não é um membro do projeto requisitado"
-#: keystone/token/providers/common.py:435
msgid "User is not a trustee."
msgstr "Usuário não é confiável."
-#: keystone/token/providers/common.py:579
-msgid ""
-"Attempting to use OS-FEDERATION token with V2 Identity Service, use V3 "
-"Authentication"
-msgstr ""
-
-#: keystone/token/providers/common.py:597
-msgid "Domain scoped token is not supported"
-msgstr "O token de escopo de domínio não é suportado"
-
-#: keystone/token/providers/pki.py:48 keystone/token/providers/pkiz.py:30
-msgid "Unable to sign token."
-msgstr "Não é possível assinar o token."
+msgid "User not found"
+msgstr "Usuário não localizado"
-#: keystone/token/providers/fernet/core.py:215
-msgid ""
-"This is not a v2.0 Fernet token. Use v3 for trust, domain, or federated "
-"tokens."
-msgstr ""
+msgid "You are not authorized to perform the requested action."
+msgstr "Você não está autorizado à realizar a ação solicitada."
-#: keystone/token/providers/fernet/token_formatters.py:189
-#, python-format
-msgid "This is not a recognized Fernet payload version: %s"
-msgstr ""
+msgid "authorizing user does not have role required"
+msgstr "Usuário autorizado não possui o role necessário"
-#: keystone/trust/controllers.py:148
-msgid "Redelegation allowed for delegated by trust only"
+msgid "provided consumer key does not match stored consumer key"
msgstr ""
+"Chave de consumidor fornecida não confere com a chave de consumidor "
+"armazenada"
-#: keystone/trust/controllers.py:181
-msgid "The authenticated user should match the trustor."
+msgid "provided request key does not match stored request key"
msgstr ""
+"Chave de requisição do provedor não confere com a chave de requisição "
+"armazenada"
-#: keystone/trust/controllers.py:186
-msgid "At least one role should be specified."
-msgstr ""
+msgid "provided verifier does not match stored verifier"
+msgstr "Verificador fornecido não confere com o verificador armazenado"
-#: keystone/trust/core.py:57
-#, python-format
-msgid ""
-"Remaining redelegation depth of %(redelegation_depth)d out of allowed "
-"range of [0..%(max_count)d]"
-msgstr ""
+msgid "region not type dogpile.cache.CacheRegion"
+msgstr "região não é do tipo dogpile.cache.CacheRegion"
-#: keystone/trust/core.py:66
#, python-format
-msgid ""
-"Field \"remaining_uses\" is set to %(value)s while it must not be set in "
-"order to redelegate a trust"
-msgstr ""
-
-#: keystone/trust/core.py:77
-msgid "Requested expiration time is more than redelegated trust can provide"
-msgstr ""
-
-#: keystone/trust/core.py:87
-msgid "Some of requested roles are not in redelegated trust"
-msgstr ""
-
-#: keystone/trust/core.py:116
-msgid "One of the trust agents is disabled or deleted"
-msgstr ""
-
-#: keystone/trust/core.py:135
-msgid "remaining_uses must be a positive integer or null."
-msgstr ""
+msgid "tls_cacertdir %s not found or is not a directory"
+msgstr "tls_cacertdir %s não encontrado ou não é um diretório"
-#: keystone/trust/core.py:141
#, python-format
-msgid ""
-"Requested redelegation depth of %(requested_count)d is greater than "
-"allowed %(max_count)d"
-msgstr ""
-
-#: keystone/trust/core.py:147
-msgid "remaining_uses must not be set if redelegation is allowed"
-msgstr ""
-
-#: keystone/trust/core.py:157
-msgid ""
-"Modifying \"redelegation_count\" upon redelegation is forbidden. Omitting"
-" this parameter is advised."
-msgstr ""
-
+msgid "tls_cacertfile %s not found or is not a file"
+msgstr "tls_cacertfile %s não encontrada ou não é um arquivo"
diff --git a/keystone-moon/keystone/locale/ru/LC_MESSAGES/keystone-log-critical.po b/keystone-moon/keystone/locale/ru/LC_MESSAGES/keystone-log-critical.po
index f8d060b3..4ec0cb4b 100644
--- a/keystone-moon/keystone/locale/ru/LC_MESSAGES/keystone-log-critical.po
+++ b/keystone-moon/keystone/locale/ru/LC_MESSAGES/keystone-log-critical.po
@@ -1,5 +1,5 @@
# Translations template for keystone.
-# Copyright (C) 2014 OpenStack Foundation
+# Copyright (C) 2015 OpenStack Foundation
# This file is distributed under the same license as the keystone project.
#
# Translators:
@@ -7,20 +7,20 @@ msgid ""
msgstr ""
"Project-Id-Version: Keystone\n"
"Report-Msgid-Bugs-To: https://bugs.launchpad.net/keystone\n"
-"POT-Creation-Date: 2014-09-07 06:06+0000\n"
+"POT-Creation-Date: 2015-08-06 06:28+0000\n"
"PO-Revision-Date: 2014-08-31 15:19+0000\n"
"Last-Translator: openstackjenkins <jenkins@openstack.org>\n"
-"Language-Team: Russian (http://www.transifex.com/projects/p/keystone/"
-"language/ru/)\n"
+"Language-Team: Russian (http://www.transifex.com/openstack/keystone/language/"
+"ru/)\n"
"Language: ru\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
-"Generated-By: Babel 1.3\n"
-"Plural-Forms: nplurals=3; plural=(n%10==1 && n%100!=11 ? 0 : n%10>=2 && n"
-"%10<=4 && (n%100<10 || n%100>=20) ? 1 : 2);\n"
+"Generated-By: Babel 2.0\n"
+"Plural-Forms: nplurals=4; plural=(n%10==1 && n%100!=11 ? 0 : n%10>=2 && n"
+"%10<=4 && (n%100<12 || n%100>14) ? 1 : n%10==0 || (n%10>=5 && n%10<=9) || (n"
+"%100>=11 && n%100<=14)? 2 : 3);\n"
-#: keystone/catalog/backends/templated.py:106
#, python-format
msgid "Unable to open template file %s"
msgstr "Не удается открыть файл шаблона %s"
diff --git a/keystone-moon/keystone/locale/tr_TR/LC_MESSAGES/keystone-log-critical.po b/keystone-moon/keystone/locale/tr_TR/LC_MESSAGES/keystone-log-critical.po
new file mode 100644
index 00000000..7d486e84
--- /dev/null
+++ b/keystone-moon/keystone/locale/tr_TR/LC_MESSAGES/keystone-log-critical.po
@@ -0,0 +1,24 @@
+# Translations template for keystone.
+# Copyright (C) 2015 OpenStack Foundation
+# This file is distributed under the same license as the keystone project.
+#
+# Translators:
+msgid ""
+msgstr ""
+"Project-Id-Version: Keystone\n"
+"Report-Msgid-Bugs-To: https://bugs.launchpad.net/keystone\n"
+"POT-Creation-Date: 2015-08-06 06:28+0000\n"
+"PO-Revision-Date: 2015-08-04 13:49+0000\n"
+"Last-Translator: İşbaran Akçayır <isbaran@gmail.com>\n"
+"Language-Team: Turkish (Turkey) (http://www.transifex.com/openstack/keystone/"
+"language/tr_TR/)\n"
+"Language: tr_TR\n"
+"MIME-Version: 1.0\n"
+"Content-Type: text/plain; charset=UTF-8\n"
+"Content-Transfer-Encoding: 8bit\n"
+"Generated-By: Babel 2.0\n"
+"Plural-Forms: nplurals=1; plural=0;\n"
+
+#, python-format
+msgid "Unable to open template file %s"
+msgstr "%s şablon dosyası açılamıyor"
diff --git a/keystone-moon/keystone/locale/tr_TR/LC_MESSAGES/keystone-log-error.po b/keystone-moon/keystone/locale/tr_TR/LC_MESSAGES/keystone-log-error.po
new file mode 100644
index 00000000..cded46bb
--- /dev/null
+++ b/keystone-moon/keystone/locale/tr_TR/LC_MESSAGES/keystone-log-error.po
@@ -0,0 +1,163 @@
+# Translations template for keystone.
+# Copyright (C) 2015 OpenStack Foundation
+# This file is distributed under the same license as the keystone project.
+#
+# Translators:
+msgid ""
+msgstr ""
+"Project-Id-Version: Keystone\n"
+"Report-Msgid-Bugs-To: https://bugs.launchpad.net/keystone\n"
+"POT-Creation-Date: 2015-08-06 06:28+0000\n"
+"PO-Revision-Date: 2015-08-04 13:50+0000\n"
+"Last-Translator: İşbaran Akçayır <isbaran@gmail.com>\n"
+"Language-Team: Turkish (Turkey) (http://www.transifex.com/openstack/keystone/"
+"language/tr_TR/)\n"
+"Language: tr_TR\n"
+"MIME-Version: 1.0\n"
+"Content-Type: text/plain; charset=UTF-8\n"
+"Content-Transfer-Encoding: 8bit\n"
+"Generated-By: Babel 2.0\n"
+"Plural-Forms: nplurals=1; plural=0;\n"
+
+msgid "Cannot retrieve Authorization headers"
+msgstr "Yetkilendirme başlıkları alınamıyor"
+
+#, python-format
+msgid ""
+"Circular reference or a repeated entry found in projects hierarchy - "
+"%(project_id)s."
+msgstr ""
+"Proje sıra düzeninde çember başvuru ya da tekrar eden girdi bulundu - "
+"%(project_id)s."
+
+#, python-format
+msgid ""
+"Circular reference or a repeated entry found in region tree - %(region_id)s."
+msgstr ""
+"Bölge ağacında çember başvuru ya da tekrar eden girdi bulundu - "
+"%(region_id)s."
+
+#, python-format
+msgid ""
+"Circular reference or a repeated entry found projects hierarchy - "
+"%(project_id)s."
+msgstr ""
+"Proje sıra düzeninde çember başvuru ya da tekrar eden girdi bulundu - "
+"%(project_id)s."
+
+#, python-format
+msgid "Command %(to_exec)s exited with %(retcode)s- %(output)s"
+msgstr "%(to_exec)s komutu %(retcode)s ile çıktı- %(output)s"
+
+#, python-format
+msgid "Could not bind to %(host)s:%(port)s"
+msgstr "%(host)s:%(port)s adresine bağlanılamadı"
+
+#, python-format
+msgid ""
+"Either [fernet_tokens] key_repository does not exist or Keystone does not "
+"have sufficient permission to access it: %s"
+msgstr ""
+"[fernet_tokents] key_repository mevcut değil ya da Keystone erişmek için "
+"yeterli izine sahip değil: %s"
+
+msgid ""
+"Error setting up the debug environment. Verify that the option --debug-url "
+"has the format <host>:<port> and that a debugger processes is listening on "
+"that port."
+msgstr ""
+"Hata ayıklama ortamının ayarlanmasında hata. --debug-url seçeneğinin "
+"<istemci>:<bağlantı noktası> biçimine sahip olduğunu ve bu bağlantı "
+"noktasında hata ayıklama sürecinin dinlediğini doğrulayın."
+
+#, python-format
+msgid "Error when signing assertion, reason: %(reason)s"
+msgstr "Teyit imzalanırken hata, sebep: %(reason)s"
+
+msgid "Failed to construct notifier"
+msgstr "Bildirici inşa etme başarısız"
+
+msgid ""
+"Failed to create [fernet_tokens] key_repository: either it already exists or "
+"you don't have sufficient permissions to create it"
+msgstr ""
+"[fernet_tokens] key_repository oluşturulamıyor: ya zaten mevcut ya da "
+"oluşturmak için yeterli izniniz yok"
+
+#, python-format
+msgid "Failed to remove file %(file_path)r: %(error)s"
+msgstr "%(file_path)r dosyası silinemedi: %(error)s"
+
+#, python-format
+msgid "Failed to send %(action)s %(event_type)s notification"
+msgstr "%(action)s %(event_type)s bildirimi gönderilemedi"
+
+#, python-format
+msgid "Failed to send %(res_id)s %(event_type)s notification"
+msgstr "%(res_id)s %(event_type)s bildirimi gönderilemedi"
+
+msgid "Failed to validate token"
+msgstr "Jeton doğrulama başarısız"
+
+#, python-format
+msgid "Malformed endpoint %(url)s - unknown key %(keyerror)s"
+msgstr "Kusurlu bitiş noktası %(url)s - bilinmeyen anahtar %(keyerror)s"
+
+#, python-format
+msgid ""
+"Malformed endpoint %s - incomplete format (are you missing a type notifier ?)"
+msgstr ""
+"Kusurlu bitiş noktası %s - tamamlanmamış biçim (bir tür bildiriciniz eksik "
+"olabilir mi ?)"
+
+#, python-format
+msgid ""
+"Malformed endpoint '%(url)s'. The following type error occurred during "
+"string substitution: %(typeerror)s"
+msgstr ""
+"Kusurlu bitiş noktası '%(url)s'. Karakter dizisi yer değiştirme sırasında şu "
+"tür hatası oluştu: %(typeerror)s"
+
+#, python-format
+msgid "Malformed endpoint - %(url)r is not a string"
+msgstr "Kusurlu bitiş noktası - %(url)r bir karakter dizisi değil"
+
+#, python-format
+msgid ""
+"Reinitializing revocation list due to error in loading revocation list from "
+"backend. Expected `list` type got `%(type)s`. Old revocation list data: "
+"%(list)r"
+msgstr ""
+"Arka uçtan feshetme listesi yüklemedeki hata sebebiyle fesih listesi yeniden "
+"ilklendiriliyor. `list` beklendi `%(type)s` alındı. Eski fesih listesi "
+"verisi: %(list)r"
+
+msgid "Server error"
+msgstr "Sunucu hatası"
+
+#, python-format
+msgid ""
+"Unable to build cache config-key. Expected format \"<argname>:<value>\". "
+"Skipping unknown format: %s"
+msgstr ""
+"Zula yapılandırma anahtarı inşa edilemiyor. Beklenen biçim \"<değişken ismi>:"
+"<değer>\". Bilinmeyen biçim atlanıyor: %s"
+
+#, python-format
+msgid "Unable to convert Keystone user or group ID. Error: %s"
+msgstr "Keystone kullanıcı veya grup kimliği dönüştürülemiyor. Hata: %s"
+
+msgid "Unable to sign token"
+msgstr "Jeton imzalanamıyor"
+
+#, python-format
+msgid "Unexpected error or malformed token determining token expiry: %s"
+msgstr "Jeton sona erme belirlemede beklenmeyen hata veya kusurlu jeton: %s"
+
+#, python-format
+msgid ""
+"Unexpected results in response for domain config - %(count)s responses, "
+"first option is %(option)s, expected option %(expected)s"
+msgstr ""
+"Alan yapılandırması yanıtında beklenmedik sonuçlar - %(count)s yanıt, ilk "
+"seçenek %(option)s, beklenen seçenek %(expected)s"
diff --git a/keystone-moon/keystone/locale/tr_TR/LC_MESSAGES/keystone-log-info.po b/keystone-moon/keystone/locale/tr_TR/LC_MESSAGES/keystone-log-info.po
new file mode 100644
index 00000000..5b6da88f
--- /dev/null
+++ b/keystone-moon/keystone/locale/tr_TR/LC_MESSAGES/keystone-log-info.po
@@ -0,0 +1,130 @@
+# Translations template for keystone.
+# Copyright (C) 2015 OpenStack Foundation
+# This file is distributed under the same license as the keystone project.
+#
+# Translators:
+msgid ""
+msgstr ""
+"Project-Id-Version: Keystone\n"
+"Report-Msgid-Bugs-To: https://bugs.launchpad.net/keystone\n"
+"POT-Creation-Date: 2015-08-06 06:28+0000\n"
+"PO-Revision-Date: 2015-08-04 13:49+0000\n"
+"Last-Translator: İşbaran Akçayır <isbaran@gmail.com>\n"
+"Language-Team: Turkish (Turkey) (http://www.transifex.com/openstack/keystone/"
+"language/tr_TR/)\n"
+"Language: tr_TR\n"
+"MIME-Version: 1.0\n"
+"Content-Type: text/plain; charset=UTF-8\n"
+"Content-Transfer-Encoding: 8bit\n"
+"Generated-By: Babel 2.0\n"
+"Plural-Forms: nplurals=1; plural=0;\n"
+
+#, python-format
+msgid ""
+"\"expires_at\" has conflicting values %(existing)s and %(new)s. Will use "
+"the earliest value."
+msgstr ""
+"\"expires_at\" çatışan değerlere sahip %(existing)s ve %(new)s. İlk değer "
+"kullanılacak."
+
+#, python-format
+msgid "Adding proxy '%(proxy)s' to KVS %(name)s."
+msgstr "'%(proxy)s' vekili KVS %(name)s'e ekleniyor."
+
+#, python-format
+msgid "Couldn't verify unknown bind: {%(bind_type)s: %(identifier)s}"
+msgstr "Bilinmeyen bağ doğrulanamıyor: {%(bind_type)s: %(identifier)s}"
+
+#, python-format
+msgid "Created a new key: %s"
+msgstr "Yeni bir anahtar oluşturuldu: %s"
+
+#, python-format
+msgid "Creating the default role %s because it does not exist."
+msgstr "Varsayılan rol %s oluşturuluyor çünkü mevcut değil."
+
+#, python-format
+msgid "Creating the default role %s failed because it was already created"
+msgstr "Varsayılan rol %s oluşturma başarısız çünkü zaten oluşturulmuş"
+
+#, python-format
+msgid "Current primary key is: %s"
+msgstr "Mevcut birincil anahtar: %s"
+
+#, python-format
+msgid ""
+"Fernet token created with length of %d characters, which exceeds 255 "
+"characters"
+msgstr ""
+"Fernet jetonu %d karakter uzunluğunda oluşturuldu, bu 255 karakteri geçiyor"
+
+#, python-format
+msgid "KVS region %s key_mangler disabled."
+msgstr "KVS bölgesi %s key_mangler kapalı."
+
+msgid "Kerberos bind authentication successful"
+msgstr "Kerberos bağ kimlik doğrulama başarılı"
+
+msgid "Kerberos credentials do not match those in bind"
+msgstr "Kerberos kimlik bilgileri bağda olanlarla eşleşmiyor"
+
+msgid "Kerberos credentials required and not present"
+msgstr "Kerberos kimlik bilgileri gerekli ve mevcut değil"
+
+msgid "Key repository is already initialized; aborting."
+msgstr "Anahtar deposu zaten ilklendirilmiş; iptal ediliyor."
+
+#, python-format
+msgid "Named bind mode %s not in bind information"
+msgstr "Adlandırılmış bağlama kipi %s bağlama bilgisinde değil"
+
+#, python-format
+msgid "Next primary key will be: %s"
+msgstr "Sonraki birincil anahtar şu olacak: %s"
+
+msgid "No bind information present in token"
+msgstr "Jetonda bağlama bilgisi yok"
+
+#, python-format
+msgid "Promoted key 0 to be the primary: %s"
+msgstr "Anahtar 0 birincil anahtarlığa yükseltildi: %s"
+
+#, python-format
+msgid ""
+"Received the following notification: service %(service)s, resource_type: "
+"%(resource_type)s, operation %(operation)s payload %(payload)s"
+msgstr ""
+"Şu bildirim alındı: servis %(service)s, kaynak_türü: %(resource_type)s, "
+"işlem %(operation)s faydalı yük %(payload)s"
+
+#, python-format
+msgid "Running command - %s"
+msgstr "Komut çalıştırılıyor - %s"
+
+#, python-format
+msgid "Starting %(arg0)s on %(host)s:%(port)s"
+msgstr "%(host)s:%(port)s üzerinde %(arg0)s başlatılıyor"
+
+#, python-format
+msgid "Starting key rotation with %(count)s key files: %(list)s"
+msgstr "Anahtar dönüşümü %(count)s anahtar dosyasıyla başlatılıyor: %(list)s"
+
+#, python-format
+msgid "Total expired tokens removed: %d"
+msgstr "Toplam süresi dolmuş jetonlar kaldırıldı: %d"
+
+#, python-format
+msgid "Using %(func)s as KVS region %(name)s key_mangler"
+msgstr "%(func)s KVS bölgesi %(name)s key_mangler olarak kullanılıyor"
+
+#, python-format
+msgid "Using default dogpile sha1_mangle_key as KVS region %s key_mangler"
+msgstr ""
+"Varsayılan dogpile sha1_mangle_key KVS bölgesi %s key_mangler olarak "
+"kullanılıyor"
+
+msgid ""
+"[fernet_tokens] key_repository does not appear to exist; attempting to "
+"create it"
+msgstr ""
+"[fernet_tokens] key_repository var gibi görünmüyor; oluşturmaya çalışılıyor"
diff --git a/keystone-moon/keystone/locale/tr_TR/LC_MESSAGES/keystone-log-warning.po b/keystone-moon/keystone/locale/tr_TR/LC_MESSAGES/keystone-log-warning.po
new file mode 100644
index 00000000..1fda963e
--- /dev/null
+++ b/keystone-moon/keystone/locale/tr_TR/LC_MESSAGES/keystone-log-warning.po
@@ -0,0 +1,249 @@
+# Translations template for keystone.
+# Copyright (C) 2015 OpenStack Foundation
+# This file is distributed under the same license as the keystone project.
+#
+# Translators:
+msgid ""
+msgstr ""
+"Project-Id-Version: Keystone\n"
+"Report-Msgid-Bugs-To: https://bugs.launchpad.net/keystone\n"
+"POT-Creation-Date: 2015-08-16 06:06+0000\n"
+"PO-Revision-Date: 2015-08-11 08:29+0000\n"
+"Last-Translator: openstackjenkins <jenkins@openstack.org>\n"
+"Language-Team: Turkish (Turkey) (http://www.transifex.com/openstack/keystone/"
+"language/tr_TR/)\n"
+"Language: tr_TR\n"
+"MIME-Version: 1.0\n"
+"Content-Type: text/plain; charset=UTF-8\n"
+"Content-Transfer-Encoding: 8bit\n"
+"Generated-By: Babel 2.0\n"
+"Plural-Forms: nplurals=1; plural=0;\n"
+
+#, python-format
+msgid "%s is not a dogpile.proxy.ProxyBackend"
+msgstr "%s dogpile.proxy.ProxyBackend değil"
+
+msgid "Auth context already exists in the request environment"
+msgstr "Yetki içeriği istenen ortamda zaten var"
+
+#, python-format
+msgid "Authorization failed. %(exception)s from %(remote_addr)s"
+msgstr "Yetkilendirme başarısız. %(remote_addr)s den %(exception)s"
+
+#, python-format
+msgid ""
+"Endpoint %(endpoint_id)s referenced in association for policy %(policy_id)s "
+"not found."
+msgstr ""
+"%(policy_id)s ile ilişkisi için başvurulan bitiş noktası %(endpoint_id)s "
+"bulunamadı."
+
+msgid "Failed to invoke ``openssl version``, assuming is v1.0 or newer"
+msgstr ""
+"``openssl version`` çalıştırılamadı, v1.0 ya da daha yeni olarak varsayılıyor"
+
+#, python-format
+msgid ""
+"Found multiple domains being mapped to a driver that does not support that "
+"(e.g. LDAP) - Domain ID: %(domain)s, Default Driver: %(driver)s"
+msgstr ""
+"Bunu desteklemeyen bir sürücüye eşleştirilen birden fazla alan bulundu (örn. "
+"LDAP) - Alan ID: %(domain)s, Varsayılan Sürücü: %(driver)s"
+
+#, python-format
+msgid ""
+"Found what looks like an incorrectly constructed config option substitution "
+"reference - domain: %(domain)s, group: %(group)s, option: %(option)s, value: "
+"%(value)s."
+msgstr ""
+"Düzgün inşa edilmemiş yapılandırma seçeneği yer değiştirme referansına "
+"benzeyen bir şey bulundu - alan: %(domain)s, grup: %(group)s, seçenek: "
+"%(option)s, değer: %(value)s."
+
+#, python-format
+msgid ""
+"Found what looks like an unmatched config option substitution reference - "
+"domain: %(domain)s, group: %(group)s, option: %(option)s, value: %(value)s. "
+"Perhaps the config option to which it refers has yet to be added?"
+msgstr ""
+"Eşleşmemiş yapılandırma seçeneği yer değiştirme referansı gibi görünen bir "
+"şey bulundu - alan: %(domain)s, grup: %(group)s, seçenek: %(option)s, değer: "
+"%(value)s. Belki başvurduğu yapılandırma seçeneği henüz eklenmemiştir?"
+
+#, python-format
+msgid "Ignoring file (%s) while scanning domain config directory"
+msgstr "Alan yapılandırma dizini taranırken dosya (%s) atlanıyor"
+
+msgid "Ignoring user name"
+msgstr "Kullanıcı adı atlanıyor"
+
+#, python-format
+msgid ""
+"Invalid additional attribute mapping: \"%s\". Format must be "
+"<ldap_attribute>:<keystone_attribute>"
+msgstr ""
+"Geçersiz ek öznitelik eşleştirmesi: \"%s\". Biçim <ldap_attribute>:"
+"<keystone_attribute> olmalı"
+
+#, python-format
+msgid "Invalid domain name (%s) found in config file name"
+msgstr "Yapılandırma dosyası isminde geçersiz alan adı (%s) bulundu"
+
+msgid ""
+"It is recommended to only use the base key-value-store implementation for "
+"the token driver for testing purposes. Please use 'memcache' or 'sql' "
+"instead."
+msgstr ""
+"Jeton sürücüsü için temel anahtar-değer-depolama uygulamasının yalnızca test "
+"amaçlı kullanımı önerilir. Lütfen 'memcache' ya da 'sql' kullanın."
+
+#, python-format
+msgid "KVS lock released (timeout reached) for: %s"
+msgstr "KVS kilidi kaldırıldı (zaman aşımına uğradı): %s"
+
+msgid ""
+"LDAP Server does not support paging. Disable paging in keystone.conf to "
+"avoid this message."
+msgstr ""
+"LDAP Sunucu sayfalamayı desteklemiyor. Bu iletiyi almamak için sayfalamayı "
+"keystone.conf'da kapatın."
+
+msgid "No domain information specified as part of list request"
+msgstr "Listeleme isteğinin parçası olarak alan bilgisi belirtilmedi"
+
+#, python-format
+msgid ""
+"Policy %(policy_id)s referenced in association for endpoint %(endpoint_id)s "
+"not found."
+msgstr ""
+"%(endpoint_id)s bitiş noktası için ilişkisi için başvurulan %(policy_id)s "
+"ilkesi bulunamadı."
+
+msgid "RBAC: Bypassing authorization"
+msgstr "RBAC: Yetkilendirme baypas ediliyor"
+
+msgid "RBAC: Invalid token"
+msgstr "RBAC: Geçersiz jeton"
+
+msgid "RBAC: Invalid user data in token"
+msgstr "RBAC: Jetonda geçersiz kullanıcı verisi"
+
+#, python-format
+msgid ""
+"Removing `%s` from revocation list due to invalid expires data in revocation "
+"list."
+msgstr ""
+"feshetme listesindeki geçersiz sona erme tarihi verisi sebebiyle `%s` "
+"feshetme listesinden kaldırılıyor."
+
+#, python-format
+msgid ""
+"TTL index already exists on db collection <%(c_name)s>, remove index <"
+"%(indx_name)s> first to make updated mongo_ttl_seconds value to be effective"
+msgstr ""
+"TTL indisi zaten <%(c_name)s> db koleksiyonunda mevcut, güncellenmiş "
+"mongo_ttl_seconds değerini etkin yapmak için önce <%(indx_name)s> indisini "
+"kaldırın"
+
+#, python-format
+msgid "Token `%s` is expired, not adding to the revocation list."
+msgstr "`%s` jetonunun süresi dolmuş, feshetme listesine eklenmiyor."
+
+#, python-format
+msgid "Truncating user password to %d characters."
+msgstr "Kullanıcı parolası %d karaktere kırpılıyor."
+
+#, python-format
+msgid "Unable to add user %(user)s to %(tenant)s."
+msgstr "Kullanıcı %(user)s %(tenant)s'e eklenemiyor."
+
+#, python-format
+msgid ""
+"Unable to change the ownership of [fernet_tokens] key_repository without a "
+"keystone user ID and keystone group ID both being provided: %s"
+msgstr ""
+"Hem keystone kullanıcı kimliği hem keystone grup kimliği verilmeden "
+"[fernet_tokens] key_repository sahipliği değiştirilemiyor: %s"
+
+#, python-format
+msgid ""
+"Unable to change the ownership of the new key without a keystone user ID and "
+"keystone group ID both being provided: %s"
+msgstr ""
+"Hem keystone kullanıcı kimliği hem keystone grup kimliği verilmeden yeni "
+"anahtarın sahipliği değiştirilemiyor: %s"
+
+#, python-format
+msgid "Unable to locate domain config directory: %s"
+msgstr "Alan yapılandırma dizini bulunamadı: %s"
+
+#, python-format
+msgid "Unable to remove user %(user)s from %(tenant)s."
+msgstr "Kullanıcı %(user)s %(tenant)s'den çıkarılamadı."
+
+#, python-format
+msgid ""
+"Unsupported policy association found - Policy %(policy_id)s, Endpoint "
+"%(endpoint_id)s, Service %(service_id)s, Region %(region_id)s, "
+msgstr ""
+"Desteklenmeyen ilke ilişkilendirmesi bulundu - İlke %(policy_id)s, Bitiş "
+"noktası %(endpoint_id)s, Servis %(service_id)s, Bölge %(region_id)s, "
+
+#, python-format
+msgid ""
+"User %(user_id)s doesn't have access to default project %(project_id)s. The "
+"token will be unscoped rather than scoped to the project."
+msgstr ""
+"%(user_id)s kullanıcısı varsayılan proje %(project_id)s erişimine sahip "
+"değil. Jeton projeye kapsamsız olacak, kapsamlı değil."
+
+#, python-format
+msgid ""
+"User %(user_id)s's default project %(project_id)s is disabled. The token "
+"will be unscoped rather than scoped to the project."
+msgstr ""
+"%(user_id)s kullanıcısının varsayılan projesi %(project_id)s kapalı. Jeton "
+"projeye kapsamsız olacak, kapsamlı değil."
+
+#, python-format
+msgid ""
+"User %(user_id)s's default project %(project_id)s not found. The token will "
+"be unscoped rather than scoped to the project."
+msgstr ""
+"%(user_id)s kullanıcısının varsayılan projesi %(project_id)s bulunamadı. "
+"Jeton projeye kapsamsız olacak, kapsamlı değil."
+
+#, python-format
+msgid ""
+"When deleting entries for %(search_base)s, could not delete nonexistent "
+"entries %(entries)s%(dots)s"
+msgstr ""
+"%(search_base)s için girdiler silinirken, mevcut olmayan girdiler %(entries)s"
+"%(dots)s silinemedi"
+
+#, python-format
+msgid "[fernet_tokens] key_repository is world readable: %s"
+msgstr "[fernet_tokens] key_repository herkesçe okunabilir: %s"
+
+msgid ""
+"[fernet_tokens] max_active_keys must be at least 1 to maintain a primary key."
+msgstr ""
+"[fernet_tokens] max_active_keys bir birincil anahtarı korumak için en az 1 "
+"olmalı."
+
+#, python-format
+msgid ""
+"`token_api.%s` is deprecated as of Juno in favor of utilizing methods on "
+"`token_provider_api` and may be removed in Kilo."
+msgstr ""
+"`token_provider_api` üzerindeki yöntemlerden faydalanmak için `token_api.%s` "
+"Juno'dan sonra tercih edilmeyecek ve Kilo'da kaldırılabilir."
+
+msgid "keystone-manage pki_setup is not recommended for production use."
+msgstr "keystone-manage pki_setup üretimde kullanmak için tavsiye edilmez."
+
+msgid "keystone-manage ssl_setup is not recommended for production use."
+msgstr "keystone-manage ssl_setup üretimde kullanmak için tavsiye edilmez."
+
+msgid "missing exception kwargs (programmer error)"
+msgstr "istisna kwargs eksik (programcı hatası)"
diff --git a/keystone-moon/keystone/locale/tr_TR/LC_MESSAGES/keystone.po b/keystone-moon/keystone/locale/tr_TR/LC_MESSAGES/keystone.po
new file mode 100644
index 00000000..6b962cfd
--- /dev/null
+++ b/keystone-moon/keystone/locale/tr_TR/LC_MESSAGES/keystone.po
@@ -0,0 +1,1288 @@
+# Turkish (Turkey) translations for keystone.
+# Copyright (C) 2015 OpenStack Foundation
+# This file is distributed under the same license as the keystone project.
+#
+# Translators:
+# Alper Çiftçi <alprciftci@gmail.com>, 2015
+# Andreas Jaeger <jaegerandi@gmail.com>, 2015
+# catborise <muhammetalisag@gmail.com>, 2013
+msgid ""
+msgstr ""
+"Project-Id-Version: Keystone\n"
+"Report-Msgid-Bugs-To: https://bugs.launchpad.net/keystone\n"
+"POT-Creation-Date: 2015-08-16 06:06+0000\n"
+"PO-Revision-Date: 2015-08-15 18:05+0000\n"
+"Last-Translator: openstackjenkins <jenkins@openstack.org>\n"
+"Language-Team: Turkish (Turkey) (http://www.transifex.com/openstack/keystone/"
+"language/tr_TR/)\n"
+"Plural-Forms: nplurals=1; plural=0\n"
+"MIME-Version: 1.0\n"
+"Content-Type: text/plain; charset=utf-8\n"
+"Content-Transfer-Encoding: 8bit\n"
+"Generated-By: Babel 2.0\n"
+
+#, python-format
+msgid "%(detail)s"
+msgstr "%(detail)s"
+
+#, python-format
+msgid ""
+"%(event)s is not a valid notification event, must be one of: %(actions)s"
+msgstr ""
+"%(event)s geçerli bir bilgilendirme olayı değil, şunlardan biri olmalı: "
+"%(actions)s"
+
+#, python-format
+msgid "%(host)s is not a trusted dashboard host"
+msgstr "%(host)s güvenilir bir gösterge paneli istemcisi değil"
+
+#, python-format
+msgid "%(message)s %(amendment)s"
+msgstr "%(message)s %(amendment)s"
+
+#, python-format
+msgid ""
+"%(mod_name)s doesn't provide database migrations. The migration repository "
+"path at %(path)s doesn't exist or isn't a directory."
+msgstr ""
+"%(mod_name)s veri tabanı göçü sağlamıyor. %(path)s yolundaki göç deposu yolu "
+"mevcut değil ya da bir dizin değil."
+
+#, python-format
+msgid "%(property_name)s cannot be less than %(min_length)s characters."
+msgstr "%(property_name)s %(min_length)s karakterden az olamaz."
+
+#, python-format
+msgid "%(property_name)s is not a %(display_expected_type)s"
+msgstr "%(property_name)s bir %(display_expected_type)s değil"
+
+#, python-format
+msgid "%(property_name)s should not be greater than %(max_length)s characters."
+msgstr "%(property_name)s %(max_length)s karakterden büyük olmamalı."
+
+#, python-format
+msgid "%s cannot be empty."
+msgstr "%s boş olamaz."
+
+#, python-format
+msgid "%s extension does not exist."
+msgstr "%s eklentisi mevcut değil."
+
+#, python-format
+msgid "%s field is required and cannot be empty"
+msgstr "%s alanı gerekli ve boş olamaz"
+
+#, python-format
+msgid "%s field(s) cannot be empty"
+msgstr "%s alan(lar)ı boş olamaz"
+
+msgid "(Disable debug mode to suppress these details.)"
+msgstr "(Bu detayları gizlemek için hata ayıklama kipini kapatın.)"
+
+msgid "--all option cannot be mixed with other options"
+msgstr "--all seçeneği diğer seçeneklerle birleştirilemez"
+
+msgid "A project-scoped token is required to produce a service catalog."
+msgstr "Servis kataloğu oluşturmak için proje-kapsamlı bir jeton gerekli."
+
+msgid "Access token is expired"
+msgstr "Erişim jetonunun süresi dolmuş"
+
+msgid "Access token not found"
+msgstr "Erişim jetonu bulunamadı"
+
+msgid "Additional authentications steps required."
+msgstr "Ek kimlik doğrulama adımları gerekli."
+
+msgid "An unexpected error occurred when retrieving domain configs"
+msgstr "Alan yapılandırmaları alınırken beklenmedik hata oluştu"
+
+#, python-format
+msgid "An unexpected error occurred when trying to store %s"
+msgstr "%s depolanırken beklenmedik bir hata oluştu"
+
+msgid "An unexpected error prevented the server from fulfilling your request."
+msgstr "Beklenmedik bir hata sunucunun isteğinizi tamamlamasını engelledi."
+
+#, python-format
+msgid ""
+"An unexpected error prevented the server from fulfilling your request: "
+"%(exception)s"
+msgstr ""
+"Beklenmedik bir hata sunucunun isteğinizi tamamlamasını engelledi: "
+"%(exception)s"
+
+msgid "An unhandled exception has occurred: Could not find metadata."
+msgstr "Ele alınmayan istisna oluştu: Metadata bulunamadı."
+
+msgid "At least one option must be provided"
+msgstr "En az bir seçenek sağlanmalıdır"
+
+msgid "At least one option must be provided, use either --all or --domain-name"
+msgstr "En az bir seçenek sağlanmalıdır, ya --all ya da --domain-name kullanın"
+
+msgid "At least one role should be specified."
+msgstr "En az bir kural belirtilmeli."
+
+msgid "Attempted to authenticate with an unsupported method."
+msgstr "Desteklenmeyen yöntem ile doğrulama girişiminde bulunuldu."
+
+msgid ""
+"Attempting to use OS-FEDERATION token with V2 Identity Service, use V3 "
+"Authentication"
+msgstr ""
+"OS-FEDERATION jetonu V2 Kimlik Servisi ile kullanılmaya çalışılıyor, V3 "
+"Kimlik Doğrulama kullanın"
+
+msgid "Authentication plugin error."
+msgstr "Kimlik doğrulama eklenti hatası."
+
+#, python-format
+msgid ""
+"Backend `%(backend)s` is not a valid memcached backend. Valid backends: "
+"%(backend_list)s"
+msgstr ""
+"Arka uç `%(backend)s` geçerli bir memcached arka ucu değil. Geçerli arka "
+"uçlar: %(backend_list)s"
+
+msgid "Cannot authorize a request token with a token issued via delegation."
+msgstr "Vekil ile sağlanan bir jeton ile istek yetkilendirilemez."
+
+#, python-format
+msgid "Cannot change %(option_name)s %(attr)s"
+msgstr "%(option_name)s %(attr)s değiştirilemiyor"
+
+msgid "Cannot change Domain ID"
+msgstr "Alan ID'si değiştirilemez"
+
+msgid "Cannot change consumer secret"
+msgstr "Tüketici sırrı değiştirilemez"
+
+msgid "Cannot change user ID"
+msgstr "Kullanıcı ID'si değiştirilemiyor"
+
+msgid "Cannot change user name"
+msgstr "Kullanıcı adı değiştirilemiyor"
+
+#, python-format
+msgid "Cannot create an endpoint with an invalid URL: %(url)s"
+msgstr "%(url)s geçersiz URL' si ile bir bitiş noktası yaratılamıyor"
+
+#, python-format
+msgid "Cannot create project with parent: %(project_id)s"
+msgstr "Üst proje %(project_id)s ye sahip proje oluşturulamıyor"
+
+#, python-format
+msgid "Cannot duplicate name %s"
+msgstr "%s ismi kopyalanamaz"
+
+msgid "Cannot list request tokens with a token issued via delegation."
+msgstr "Vekalet ile sağlanan bir jeton ile istek jetonları listelenemez."
+
+#, python-format
+msgid "Cannot open certificate %(cert_file)s. Reason: %(reason)s"
+msgstr "Sertifika %(cert_file)s açılamıyor. Sebep: %(reason)s"
+
+#, python-format
+msgid "Cannot remove role that has not been granted, %s"
+msgstr "Verilmemiş rol silinemez, %s"
+
+msgid ""
+"Cannot truncate a driver call without hints list as first parameter after "
+"self "
+msgstr ""
+"self'den sonra ilk parametre olarak ipucu listesi verilmeden bir sürücü "
+"çağrısı kırpılamıyor "
+
+msgid ""
+"Cannot use parents_as_list and parents_as_ids query params at the same time."
+msgstr ""
+"parents_as_list ve parents_as_ids sorgu parametreleri aynı anda kullanılamaz."
+
+msgid ""
+"Cannot use subtree_as_list and subtree_as_ids query params at the same time."
+msgstr ""
+"subtree_as_list ve subtree_as_ids sorgu parametreleri aynı anda kullanılamaz."
+
+msgid ""
+"Combining effective and group filter will always result in an empty list."
+msgstr ""
+"Efektif ve grup filtresini birleştirmek her zaman boş bir listeye yol açar."
+
+msgid ""
+"Combining effective, domain and inherited filters will always result in an "
+"empty list."
+msgstr ""
+"Efektif, alan ve miras filtrelerin birleştirilmesi her zaman boş bir listeye "
+"yol açar."
+
+#, python-format
+msgid "Conflict occurred attempting to store %(type)s - %(details)s"
+msgstr "%(type)s depolanırken çatışma oluştu- %(details)s"
+
+#, python-format
+msgid "Conflicting region IDs specified: \"%(url_id)s\" != \"%(ref_id)s\""
+msgstr "Çatışan bölge kimlikleri belirtildi: \"%(url_id)s\" != \"%(ref_id)s\""
+
+msgid "Consumer not found"
+msgstr "Tüketici bulunamadı"
+
+#, python-format
+msgid ""
+"Could not change immutable attribute(s) '%(attributes)s' in target %(target)s"
+msgstr ""
+"%(target)s hedefindeki değişmez öznitelik(ler) '%(attributes)s' "
+"değiştirilemiyor"
+
+#, python-format
+msgid ""
+"Could not find %(group_or_option)s in domain configuration for domain "
+"%(domain_id)s"
+msgstr ""
+"%(domain_id)s alanı için alan yapılandırmasında %(group_or_option)s "
+"bulunamadı"
+
+#, python-format
+msgid "Could not find Endpoint Group: %(endpoint_group_id)s"
+msgstr "Bitişnoktası Grubu bulunamadı: %(endpoint_group_id)s"
+
+msgid "Could not find Identity Provider identifier in environment"
+msgstr "Kimlik Sağlayıcı tanımlayıcısı ortamda bulunamıyor"
+
+#, python-format
+msgid "Could not find Identity Provider: %(idp_id)s"
+msgstr "Kimlik Sağlayıcı bulunamadı: %(idp_id)s"
+
+#, python-format
+msgid "Could not find Service Provider: %(sp_id)s"
+msgstr "Servis Sağlayıcı bulunamadı: %(sp_id)s"
+
+#, python-format
+msgid "Could not find credential: %(credential_id)s"
+msgstr "Kimlik bilgisi bulunamadı: %(credential_id)s"
+
+#, python-format
+msgid "Could not find domain: %(domain_id)s"
+msgstr "Alan bulunamadı: %(domain_id)s"
+
+#, python-format
+msgid "Could not find endpoint: %(endpoint_id)s"
+msgstr "Bitiş noktası bulunamadı: %(endpoint_id)s"
+
+#, python-format
+msgid ""
+"Could not find federated protocol %(protocol_id)s for Identity Provider: "
+"%(idp_id)s"
+msgstr ""
+"Kimlik Sağlayıcı: %(idp_id)s için birleşmiş iletişim kuralı %(protocol_id)s "
+"bulunamadı"
+
+#, python-format
+msgid "Could not find group: %(group_id)s"
+msgstr "Grup bulunamadı: %(group_id)s"
+
+#, python-format
+msgid "Could not find mapping: %(mapping_id)s"
+msgstr "Eşleştirme bulunamadı: %(mapping_id)s"
+
+msgid "Could not find policy association"
+msgstr "İlke ilişkilendirme bulunamadı"
+
+#, python-format
+msgid "Could not find policy: %(policy_id)s"
+msgstr "İlke bulunamadı: %(policy_id)s"
+
+#, python-format
+msgid "Could not find project: %(project_id)s"
+msgstr "Proje bulunamadı: %(project_id)s"
+
+#, python-format
+msgid "Could not find region: %(region_id)s"
+msgstr "Bölge bulunamadı: %(region_id)s"
+
+msgid "Could not find role"
+msgstr "Rol bulunamadı"
+
+#, python-format
+msgid ""
+"Could not find role assignment with role: %(role_id)s, user or group: "
+"%(actor_id)s, project or domain: %(target_id)s"
+msgstr ""
+"Rol: %(role_id)s, kullanıcı veya grup: %(actor_id)s, proje veya alan: "
+"%(target_id)s ile rol ataması bulunamadı"
+
+#, python-format
+msgid "Could not find role: %(role_id)s"
+msgstr "Rol bulunamadı: %(role_id)s"
+
+#, python-format
+msgid "Could not find service: %(service_id)s"
+msgstr "Servis bulunamadı: %(service_id)s"
+
+#, python-format
+msgid "Could not find token: %(token_id)s"
+msgstr "Jeton bulunamadı: %(token_id)s"
+
+#, python-format
+msgid "Could not find trust: %(trust_id)s"
+msgstr "Güven bulunamadı: %(trust_id)s"
+
+#, python-format
+msgid "Could not find user: %(user_id)s"
+msgstr "Kullanıcı bulunamadı: %(user_id)s"
+
+#, python-format
+msgid "Could not find version: %(version)s"
+msgstr "Sürüm bulunamadı: %(version)s"
+
+#, python-format
+msgid "Could not find: %(target)s"
+msgstr "Bulunamadı: %(target)s"
+
+msgid "Could not validate the access token"
+msgstr "Erişim jetonu doğrulanamadı"
+
+msgid "Credential belongs to another user"
+msgstr "Kimlik bilgisi başka bir kullanıcıya ait"
+
+#, python-format
+msgid "Database at /domains/%s/config"
+msgstr "/domains/%s/config konumundaki veri tabanı"
+
+msgid ""
+"Disabling an entity where the 'enable' attribute is ignored by configuration."
+msgstr ""
+"'enable' özniteliği yapılandırma tarafından göz ardı edilen bir öğe "
+"kapatılıyor."
+
+#, python-format
+msgid "Domain (%s)"
+msgstr "Alan (%s)"
+
+#, python-format
+msgid "Domain cannot be named %s"
+msgstr "Alan %s olarak adlandırılamaz"
+
+#, python-format
+msgid "Domain cannot have ID %s"
+msgstr "Alan %s ID'sine sahip olamaz"
+
+#, python-format
+msgid "Domain is disabled: %s"
+msgstr "Alan kapalı: %s"
+
+msgid "Domain metadata not supported by LDAP"
+msgstr "Alan metadata'sı LDAP tarafından desteklenmiyor"
+
+msgid "Domain scoped token is not supported"
+msgstr "Alan kapsamlı jeton desteklenmiyor"
+
+#, python-format
+msgid ""
+"Domain specific sql drivers are not supported via the Identity API. One is "
+"specified in /domains/%s/config"
+msgstr ""
+"Alana özel sql sürücüleri Kimlik API'si tarafından desteklenmiyor. Birisi /"
+"domains/%s/config içinde tanımlanmış"
+
+#, python-format
+msgid ""
+"Domain: %(domain)s already has a configuration defined - ignoring file: "
+"%(file)s."
+msgstr ""
+"Alan: %(domain)s zaten tanımlanmış bir yapılandırmaya sahip - dosya "
+"atlanıyor: %(file)s."
+
+msgid "Domains are not supported by the v2 API. Please use the v3 API instead."
+msgstr "v2 API alanları desteklemiyor. Bunun yerine lütfen v3 API kullanın"
+
+msgid "Domains are read-only against LDAP"
+msgstr "Alanlar LDAP'a karşı yalnızca-okunur"
+
+msgid "Duplicate Entry"
+msgstr "Kopya Girdi"
+
+#, python-format
+msgid "Duplicate ID, %s."
+msgstr "Kopya ID, %s"
+
+#, python-format
+msgid "Duplicate name, %s."
+msgstr "Kopya isim, %s."
+
+msgid "Enabled field must be a boolean"
+msgstr "Etkin alan bool olmalı"
+
+msgid "Enabled field should be a boolean"
+msgstr "Etkin alan bool olmalı"
+
+#, python-format
+msgid "Endpoint %(endpoint_id)s not found in project %(project_id)s"
+msgstr "Bitiş noktası %(endpoint_id)s %(project_id)s projesinde bulunamadı"
+
+msgid "Endpoint Group Project Association not found"
+msgstr "Bitiş Noktası Grup Proje İlişkisi bulunamadı"
+
+msgid "Ensure configuration option idp_entity_id is set."
+msgstr "idp_entity_id yapılandırma seçeneğinin ayarlandığına emin olun."
+
+msgid "Ensure configuration option idp_sso_endpoint is set."
+msgstr "idp_sso_endpoint yapılandırma seçeneğinin ayarlandığına emin olun."
+
+#, python-format
+msgid ""
+"Error parsing configuration file for domain: %(domain)s, file: %(file)s."
+msgstr ""
+"Alan: %(domain)s için yapılandırma dosyası ayrıştırılırken hata, dosya: "
+"%(file)s."
+
+#, python-format
+msgid "Error while opening file %(path)s: %(err)s"
+msgstr "Dosya açılırken hata %(path)s: %(err)s"
+
+#, python-format
+msgid "Error while parsing line: '%(line)s': %(err)s"
+msgstr "Satır ayrıştırılırken hata: '%(line)s': %(err)s"
+
+#, python-format
+msgid "Error while parsing rules %(path)s: %(err)s"
+msgstr "Kurallar ayrıştırılırken hata %(path)s: %(err)s"
+
+#, python-format
+msgid "Error while reading metadata file, %(reason)s"
+msgstr "Metadata dosyası okunurken hata, %(reason)s"
+
+#, python-format
+msgid "Expected dict or list: %s"
+msgstr "Sözlük ya da liste beklendi: %s"
+
+msgid ""
+"Expected signing certificates are not available on the server. Please check "
+"Keystone configuration."
+msgstr ""
+"Beklenen imzalama sertifikaları sunucuda kullanılabilir değil. Lütfen "
+"Keystone yapılandırmasını kontrol edin."
+
+#, python-format
+msgid ""
+"Expecting to find %(attribute)s in %(target)s - the server could not comply "
+"with the request since it is either malformed or otherwise incorrect. The "
+"client is assumed to be in error."
+msgstr ""
+"%(target)s içinde %(attribute)s bulunması bekleniyordu - sunucu talebi "
+"yerine getiremedi çünkü ya istek kusurluydu ya da geçersizdi. İstemcinin "
+"hatalı olduğu varsayılıyor."
+
+#, python-format
+msgid "Failed to start the %(name)s server"
+msgstr "%(name)s sunucusu başlatılamadı"
+
+msgid "Failed to validate token"
+msgstr "Jeton doğrulama başarısız"
+
+msgid "Federation token is expired"
+msgstr "Federasyon jetonunun süresi dolmuş"
+
+#, python-format
+msgid ""
+"Field \"remaining_uses\" is set to %(value)s while it must not be set in "
+"order to redelegate a trust"
+msgstr ""
+"\"remaining_uses\" alanı %(value)s olarak ayarlanmış, bir güvene tekrar "
+"yetki vermek için böyle ayarlanmamalı"
+
+msgid "Found invalid token: scoped to both project and domain."
+msgstr "Geçersiz jeton bulundu: hem proje hem alana kapsanmış."
+
+#, python-format
+msgid "Group %(group)s is not supported for domain specific configurations"
+msgstr "%(group)s grubu alana özel yapılandırmalar için desteklenmiyor"
+
+#, python-format
+msgid ""
+"Group %(group_id)s returned by mapping %(mapping_id)s was not found in the "
+"backend."
+msgstr ""
+"%(mapping_id)s eşleştirmesi tarafından döndürülen %(group_id)s grubu arka "
+"uçta bulunamadı."
+
+#, python-format
+msgid ""
+"Group membership across backend boundaries is not allowed, group in question "
+"is %(group_id)s, user is %(user_id)s"
+msgstr ""
+"Arka uç sınırları arasında grup üyeliğine izin verilmez, sorudaki grup "
+"%(group_id)s, kullanıcı ise %(user_id)s"
+
+#, python-format
+msgid "ID attribute %(id_attr)s not found in LDAP object %(dn)s"
+msgstr "ID özniteliği %(id_attr)s %(dn)s LDAP nesnesinde bulunamadı"
+
+#, python-format
+msgid "Identity Provider %(idp)s is disabled"
+msgstr "Kimlik Sağlayıcı %(idp)s kapalı"
+
+msgid ""
+"Incoming identity provider identifier not included among the accepted "
+"identifiers."
+msgstr ""
+"Gelen kimlik sağlayıcı tanımlayıcısı kabul edilen tanımlayıcılar arasında "
+"yok."
+
+#, python-format
+msgid "Invalid LDAP TLS certs option: %(option)s. Choose one of: %(options)s"
+msgstr ""
+"Geçersiz LDAP TLS sertifika seçeneği: %(option)s. Şunlardan birini seçin: "
+"%(options)s"
+
+#, python-format
+msgid "Invalid LDAP TLS_AVAIL option: %s. TLS not available"
+msgstr "Geçersiz LDAP TLS_AVAIL seçeneği: %s. TLS kullanılabilir değil"
+
+#, python-format
+msgid "Invalid LDAP deref option: %(option)s. Choose one of: %(options)s"
+msgstr ""
+"Geçersiz LDAP referans kaldırma seçeneği: %(option)s. Şunlardan birini "
+"seçin: %(options)s"
+
+#, python-format
+msgid "Invalid LDAP scope: %(scope)s. Choose one of: %(options)s"
+msgstr "Geçersiz LDAP kapsamı: %(scope)s. Şunlardan birini seçin: %(options)s"
+
+msgid "Invalid TLS / LDAPS combination"
+msgstr "Geçersiz TLS / LDAPS kombinasyonu"
+
+#, python-format
+msgid "Invalid audit info data type: %(data)s (%(type)s)"
+msgstr "Geçersiz denetim bilgisi veri türü: %(data)s (%(type)s)"
+
+msgid "Invalid blob in credential"
+msgstr "Kimlik bilgisinde geçersiz düğüm"
+
+#, python-format
+msgid ""
+"Invalid domain name: %(domain)s found in config file name: %(file)s - "
+"ignoring this file."
+msgstr ""
+"Yapılandırma dosyası isminde: %(file)s geçersiz alan adı: %(domain)s bulundu "
+"- bu dosya atlanıyor."
+
+#, python-format
+msgid "Invalid domain specific configuration: %(reason)s"
+msgstr "Geçersiz alana özel yapılandırma: %(reason)s"
+
+#, python-format
+msgid "Invalid input for field '%(path)s'. The value is '%(value)s'."
+msgstr "'%(path)s' alanı için geçersiz girdi. Değer '%(value)s'."
+
+msgid "Invalid limit value"
+msgstr "Geçersiz sınır değeri"
+
+#, python-format
+msgid ""
+"Invalid mix of entities for policy association - only Endpoint, Service or "
+"Region+Service allowed. Request was - Endpoint: %(endpoint_id)s, Service: "
+"%(service_id)s, Region: %(region_id)s"
+msgstr ""
+"İlke ilişkilendirmeleri için geçersiz öğe karışımı - yalnızca Bitişnoktası, "
+"Servis veya Bölge+Servise izin verilir. İstek şuydu Bitişnoktası: "
+"%(endpoint_id)s, Servis: %(service_id)s, Bölge: %(region_id)s"
+
+#, python-format
+msgid ""
+"Invalid rule: %(identity_value)s. Both 'groups' and 'domain' keywords must "
+"be specified."
+msgstr ""
+"Geçersiz kural: %(identity_value)s. Hem 'gruplar' hem 'alan' anahtar "
+"kelimeleri belirtilmeli."
+
+msgid "Invalid signature"
+msgstr "Geçersiz imza"
+
+#, python-format
+msgid ""
+"Invalid ssl_cert_reqs value of %s, must be one of \"NONE\", \"OPTIONAL\", "
+"\"REQUIRED\""
+msgstr ""
+"%s değerinde geçersiz ssl_cert_reqs, \"HİÇBİRİ\", \"İSTEĞE BAĞLI\", \"GEREKLİ"
+"\" den biri olmalı"
+
+msgid "Invalid user / password"
+msgstr "Geçersiz kullanıcı / parola"
+
+msgid "Invalid username or password"
+msgstr "Geçersiz kullanıcı adı ve parola"
+
+#, python-format
+msgid "KVS region %s is already configured. Cannot reconfigure."
+msgstr "KVS bölgesi %s zaten yapılandırılmış. Yeniden yapılandırılamıyor."
+
+#, python-format
+msgid "Key Value Store not configured: %s"
+msgstr "Anahtar Değer Deposu yapılandırılmamış: %s"
+
+#, python-format
+msgid "LDAP %s create"
+msgstr "LDAP %s oluştur"
+
+#, python-format
+msgid "LDAP %s delete"
+msgstr "LDAP %s sil"
+
+#, python-format
+msgid "LDAP %s update"
+msgstr "LDAP %s güncelle"
+
+#, python-format
+msgid "Lock Timeout occurred for key, %(target)s"
+msgstr "Anahtar için Kilit Zaman Aşımı oluştu, %(target)s"
+
+#, python-format
+msgid "Lock key must match target key: %(lock)s != %(target)s"
+msgstr "Kilit anahtarı hedef anahtarla eşleşmeli: %(lock)s != %(target)s"
+
+#, python-format
+msgid "Malformed endpoint URL (%(endpoint)s), see ERROR log for details."
+msgstr ""
+"Kusurlu bitiş noktası URL'si (%(endpoint)s), detaylar için HATA kaydına "
+"bakın."
+
+msgid "Marker could not be found"
+msgstr "İşaretçi bulunamadı"
+
+#, python-format
+msgid "Maximum lock attempts on %s occurred."
+msgstr "%s üzerinde azami kilit girişimi yapıldı."
+
+#, python-format
+msgid "Member %(member)s is already a member of group %(group)s"
+msgstr "Üye %(member)s zaten %(group)s grubunun üyesi"
+
+#, python-format
+msgid "Method not callable: %s"
+msgstr "Metod çağrılabilir değil: %s"
+
+msgid "Missing entity ID from environment"
+msgstr "Öğe kimliği ortamdan eksik"
+
+msgid ""
+"Modifying \"redelegation_count\" upon redelegation is forbidden. Omitting "
+"this parameter is advised."
+msgstr ""
+"Tekrar yetkilendirme üzerine \"redelegation_count\" değiştirmeye izin "
+"verilmez. Tavsiye edildiği gibi bu parametre atlanıyor."
+
+msgid "Multiple domains are not supported"
+msgstr "Birden çok alan desteklenmiyor"
+
+msgid "Must be called within an active lock context."
+msgstr "Etkin kilik içeriği içinde çağrılmalı."
+
+msgid "Must specify either domain or project"
+msgstr "Alan ya da projeden biri belirtilmelidir"
+
+msgid "Name field is required and cannot be empty"
+msgstr "İsim alanı gerekli ve boş olamaz"
+
+msgid ""
+"No Authorization headers found, cannot proceed with OAuth related calls, if "
+"running under HTTPd or Apache, ensure WSGIPassAuthorization is set to On."
+msgstr ""
+"Yetkilendirme başlıkları bulunamadı, OAuth ile ilişkili çağrılarla devam "
+"edilemez, HTTPd veya Apache altında çalışıyorsanız, WSGIPassAuthorization "
+"ayarını açtığınızdan emin olun."
+
+msgid "No authenticated user"
+msgstr "Kimlik denetimi yapılmamış kullanıcı"
+
+msgid ""
+"No encryption keys found; run keystone-manage fernet_setup to bootstrap one."
+msgstr ""
+"Şifreleme anahtarları bulundu; birini yükletmek için keystone-manage "
+"fernet_setup çalıştırın."
+
+msgid "No options specified"
+msgstr "Hiçbir seçenek belirtilmedi"
+
+#, python-format
+msgid "No policy is associated with endpoint %(endpoint_id)s."
+msgstr "Hiçbir ilke %(endpoint_id)s bitiş noktasıyla ilişkilendirilmemiş."
+
+#, python-format
+msgid "No remaining uses for trust: %(trust_id)s"
+msgstr "Güven için kalan kullanım alanı yok: %(trust_id)s"
+
+msgid "Non-default domain is not supported"
+msgstr "Varsayılan olmayan alan desteklenmiyor"
+
+msgid "One of the trust agents is disabled or deleted"
+msgstr "Güven ajanlarından biri kapalı ya da silinmiş"
+
+#, python-format
+msgid ""
+"Option %(option)s found with no group specified while checking domain "
+"configuration request"
+msgstr ""
+"%(option)s seçeneği alan yapılandırma isteği kontrol edilirken hiçbir grup "
+"belirtilmemiş şekilde bulundu"
+
+#, python-format
+msgid ""
+"Option %(option)s in group %(group)s is not supported for domain specific "
+"configurations"
+msgstr ""
+"%(group)s grubundaki %(option)s seçeneği alana özel yapılandırmalarda "
+"desteklenmiyor"
+
+#, python-format
+msgid "Project (%s)"
+msgstr "Proje (%s)"
+
+#, python-format
+msgid "Project ID not found: %(t_id)s"
+msgstr "Proje kimliği bulunamadı: %(t_id)s"
+
+msgid "Project field is required and cannot be empty."
+msgstr "Proje alanı gerekli ve boş olamaz."
+
+#, python-format
+msgid "Project is disabled: %s"
+msgstr "Proje kapalı: %s"
+
+msgid "Redelegation allowed for delegated by trust only"
+msgstr ""
+"Tekrar yetki vermeye yalnızca güven tarafından yetki verilenler için izin "
+"verilir"
+
+#, python-format
+msgid ""
+"Remaining redelegation depth of %(redelegation_depth)d out of allowed range "
+"of [0..%(max_count)d]"
+msgstr ""
+"izin verilen [0..%(max_count)d] aralığı içinden %(redelegation_depth)d izin "
+"verilen tekrar yetki verme derinliği"
+
+msgid "Request Token does not have an authorizing user id"
+msgstr "İstek Jetonu yetki veren bir kullanıcı id'sine sahip değil"
+
+#, python-format
+msgid ""
+"Request attribute %(attribute)s must be less than or equal to %(size)i. The "
+"server could not comply with the request because the attribute size is "
+"invalid (too large). The client is assumed to be in error."
+msgstr ""
+"İstek özniteliği %(attribute)s %(size)i boyutuna eşit ya da daha küçük "
+"olmalı. Sunucu talebi yerine getiremedi çünkü öznitelik boyutu geçersiz (çok "
+"büyük). İstemcinin hata durumunda olduğu varsayılıyor."
+
+msgid "Request must have an origin query parameter"
+msgstr "İstek bir başlangıç noktası sorgu parametresine sahip olmalı"
+
+msgid "Request token is expired"
+msgstr "İstek jetonunun süresi dolmuş"
+
+msgid "Request token not found"
+msgstr "İstek jetonu bulunamadı"
+
+msgid "Requested expiration time is more than redelegated trust can provide"
+msgstr ""
+"İstenen zaman bitim süresi tekrar yetkilendirilen güvenin "
+"sağlayabileceğinden fazla"
+
+#, python-format
+msgid ""
+"Requested redelegation depth of %(requested_count)d is greater than allowed "
+"%(max_count)d"
+msgstr ""
+"%(requested_count)d istenen tekrar yetki verme derinliği izin verilen "
+"%(max_count)d den fazla"
+
+#, python-format
+msgid "Role %s not found"
+msgstr "%s rolü bulunamadı"
+
+msgid ""
+"Running keystone via eventlet is deprecated as of Kilo in favor of running "
+"in a WSGI server (e.g. mod_wsgi). Support for keystone under eventlet will "
+"be removed in the \"M\"-Release."
+msgstr ""
+"Bir WSGI sunucuda (örn. mod_wsgi) çalıştırmak adına, keystone'nin eventlet "
+"ile çalıştırılması Kilo'dan sonra desteklenmiyor. Eventlet altında keystone "
+"desteği \"M\"-Sürümünde kaldırılacak."
+
+msgid "Scoping to both domain and project is not allowed"
+msgstr "Hem alan hem projeye kapsamlamaya izin verilmez"
+
+msgid "Scoping to both domain and trust is not allowed"
+msgstr "Hem alan hem güvene kapsamlamaya izin verilmez"
+
+msgid "Scoping to both project and trust is not allowed"
+msgstr "Hem proje hem güvene kapsamlamaya izin verilmez"
+
+#, python-format
+msgid "Service Provider %(sp)s is disabled"
+msgstr "Servis Sağlayıcı %(sp)s kapalı"
+
+msgid "Some of requested roles are not in redelegated trust"
+msgstr "İstenen rollerin bazıları tekrar yetki verilen güven içinde değil"
+
+msgid "Specify a domain or project, not both"
+msgstr "Bir alan ya da proje belirtin, ya da her ikisini"
+
+msgid "Specify a user or group, not both"
+msgstr "Bir kullanıcı ya da grup belirtin, ikisini birden değil"
+
+msgid "Specify one of domain or project"
+msgstr "Alandan ya da projeden birini belirtin"
+
+msgid "Specify one of user or group"
+msgstr "Kullanıcı ya da grup belirtin"
+
+#, python-format
+msgid ""
+"String length exceeded.The length of string '%(string)s' exceeded the limit "
+"of column %(type)s(CHAR(%(length)d))."
+msgstr ""
+"Karakter dizisi uzunluğu aşıldı. '%(string)s' karakter dizisiz uzunluğu "
+"%(type)s(CHAR(%(length)d)) sütunu sınırını aşıyor."
+
+msgid ""
+"The 'expires_at' must not be before now. The server could not comply with "
+"the request since it is either malformed or otherwise incorrect. The client "
+"is assumed to be in error."
+msgstr ""
+"'expires_at' şu andan önce olmamalı. Sunucu talebi yerine getiremedi çünkü "
+"istek ya kusurlu ya da geçersiz. İstemcinin hata durumunda olduğu "
+"varsayılıyor."
+
+msgid "The --all option cannot be used with the --domain-name option"
+msgstr "--all seçeneği --domain-name seçeneğiyle kullanılamaz"
+
+#, python-format
+msgid "The Keystone configuration file %(config_file)s could not be found."
+msgstr "Keystone yapılandırma dosyası %(config_file)s bulunamadı."
+
+#, python-format
+msgid ""
+"The Keystone domain-specific configuration has specified more than one SQL "
+"driver (only one is permitted): %(source)s."
+msgstr ""
+"Keystone alana özel yapılandırması birden fazla SQL sürücüsü belirtti "
+"(yalnızca birine izin verilir): %(source)s."
+
+msgid "The action you have requested has not been implemented."
+msgstr "İstediğiniz eylem uygulanmamış."
+
+msgid "The authenticated user should match the trustor."
+msgstr "Yetkilendirilen kullanıcı güven verenle eşleşmeli."
+
+msgid ""
+"The certificates you requested are not available. It is likely that this "
+"server does not use PKI tokens otherwise this is the result of "
+"misconfiguration."
+msgstr ""
+"İstediğiniz sertifikalar kullanılabilir değil. Bu sunucu muhtemelen PKI "
+"jetonlarını kullanmıyor ya da bu bir yanlış yapılandırmanın sonucu."
+
+#, python-format
+msgid ""
+"The password length must be less than or equal to %(size)i. The server could "
+"not comply with the request because the password is invalid."
+msgstr ""
+"Parola uzunluğu %(size)i ye eşit ya da daha küçük olmalı. Sunucu talebe "
+"cevap veremedi çünkü parola geçersiz."
+
+msgid "The request you have made requires authentication."
+msgstr "Yaptığınız istek kimlik doğrulama gerektiriyor."
+
+msgid "The resource could not be found."
+msgstr "Kaynak bulunamadı."
+
+msgid ""
+"The revoke call must not have both domain_id and project_id. This is a bug "
+"in the Keystone server. The current request is aborted."
+msgstr ""
+"İptal etme çağrısı hem domain_id hem project_id'ye sahip olmamalı. Bu "
+"Keystone sunucudaki bir hata. Mevcut istek iptal edildi."
+
+msgid "The service you have requested is no longer available on this server."
+msgstr "İstediğiniz servis artık bu sunucu üzerinde kullanılabilir değil."
+
+#, python-format
+msgid ""
+"The specified parent region %(parent_region_id)s would create a circular "
+"region hierarchy."
+msgstr ""
+"Belirtilen üst bölge %(parent_region_id)s dairesel bölge sıralı dizisi "
+"oluştururdu."
+
+#, python-format
+msgid ""
+"The value of group %(group)s specified in the config should be a dictionary "
+"of options"
+msgstr ""
+"Yapılandırmada belirtilen %(group)s grubunun değeri seçenekler sözlüğü olmalı"
+
+msgid "There should not be any non-oauth parameters"
+msgstr "Herhangi bir non-oauth parametresi olmamalı"
+
+#, python-format
+msgid "This is not a recognized Fernet payload version: %s"
+msgstr "Bu bilinen bir Fernet faydalı yük sürümü değil: %s"
+
+msgid ""
+"This is not a v2.0 Fernet token. Use v3 for trust, domain, or federated "
+"tokens."
+msgstr ""
+"Bu v2.0 Fernet jetonu değil. Güven, alan, veya federasyon jetonları için v3 "
+"kullanın."
+
+msgid ""
+"Timestamp not in expected format. The server could not comply with the "
+"request since it is either malformed or otherwise incorrect. The client is "
+"assumed to be in error."
+msgstr ""
+"Zaman damgası beklenen biçimde değil. Sunucu talebi yerine getiremedi çünkü "
+"istek ya kusurlu ya da geçersiz. İstemcinin hata durumunda olduğu "
+"varsayılıyor."
+
+#, python-format
+msgid ""
+"To get a more detailed information on this error, re-run this command for "
+"the specific domain, i.e.: keystone-manage domain_config_upload --domain-"
+"name %s"
+msgstr ""
+"Bu hatayla ilgili daha detaylı bilgi almak için, bu komutu belirtilen alan "
+"için tekrar çalıştırın, örn.: keystone-manage domain_config_upload --domain-"
+"name %s"
+
+msgid "Token belongs to another user"
+msgstr "Jeton başka bir kullanıcıya ait"
+
+msgid "Token does not belong to specified tenant."
+msgstr "Jeton belirtilen kiracıya ait değil."
+
+msgid "Trustee has no delegated roles."
+msgstr "Yedieminin emanet edilen kuralları yok."
+
+msgid "Trustor is disabled."
+msgstr "Güven kurucu kapalı."
+
+#, python-format
+msgid ""
+"Trying to update group %(group)s, so that, and only that, group must be "
+"specified in the config"
+msgstr ""
+"%(group)s grubu güncellenmeye çalışılıyor, böylece yapılandırmada yalnızca "
+"grup belirtilmeli"
+
+#, python-format
+msgid ""
+"Trying to update option %(option)s in group %(group)s, but config provided "
+"contains option %(option_other)s instead"
+msgstr ""
+"%(group)s grubundaki %(option)s seçeneği güncellenmeye çalışılıyor, ama "
+"sağlanan yapılandırma %(option_other)s seçeneğini içeriyor"
+
+#, python-format
+msgid ""
+"Trying to update option %(option)s in group %(group)s, so that, and only "
+"that, option must be specified in the config"
+msgstr ""
+"%(group)s grubundaki %(option)s seçeneği güncellenmeye çalışıldı, böylece, "
+"yapılandırmada yalnızca bu seçenek belirtilmeli"
+
+msgid ""
+"Unable to access the keystone database, please check it is configured "
+"correctly."
+msgstr ""
+"Keystone veri tabanına erişilemiyor, lütfen doğru yapılandırıldığından emin "
+"olun."
+
+#, python-format
+msgid "Unable to consume trust %(trust_id)s, unable to acquire lock."
+msgstr "%(trust_id)s güveni tüketilemedi, kilit elde edilemiyor."
+
+#, python-format
+msgid ""
+"Unable to delete region %(region_id)s because it or its child regions have "
+"associated endpoints."
+msgstr ""
+"Bölge %(region_id)s silinemedi çünkü kendisi ya da alt bölgelerinin "
+"ilişkilendirilmiş bitiş noktaları var."
+
+#, python-format
+msgid "Unable to find valid groups while using mapping %(mapping_id)s"
+msgstr "Eşleştirme %(mapping_id)s kullanırken geçerli gruplar bulunamadı"
+
+#, python-format
+msgid ""
+"Unable to get a connection from pool id %(id)s after %(seconds)s seconds."
+msgstr "%(seconds)s saniye sonra havuz %(id)s'den bağlantı alınamadı."
+
+#, python-format
+msgid "Unable to locate domain config directory: %s"
+msgstr "Alan yapılandırma dizini bulunamıyor: %s"
+
+#, python-format
+msgid "Unable to lookup user %s"
+msgstr "%s kullanıcısı aranamadı"
+
+#, python-format
+msgid ""
+"Unable to reconcile identity attribute %(attribute)s as it has conflicting "
+"values %(new)s and %(old)s"
+msgstr ""
+"Kimlik özniteliği %(attribute)s bağdaştırılamıyor çünkü çatışan değerleri "
+"var %(new)s ve %(old)s"
+
+#, python-format
+msgid ""
+"Unable to sign SAML assertion. It is likely that this server does not have "
+"xmlsec1 installed, or this is the result of misconfiguration. Reason "
+"%(reason)s"
+msgstr ""
+"SAML ifadesi imzalanamıyor. Muhtemelen bu sunucuda xmlsec1 kurulu değil, "
+"veya bu bir yanlış yapılandırmanın sonucu. Sebep %(reason)s"
+
+msgid "Unable to sign token."
+msgstr "Jeton imzalanamıyor."
+
+#, python-format
+msgid "Unexpected assignment type encountered, %s"
+msgstr "Beklenmedik atama türüyle karşılaşıldı, %s"
+
+#, python-format
+msgid ""
+"Unexpected combination of grant attributes - User: %(user_id)s, Group: "
+"%(group_id)s, Project: %(project_id)s, Domain: %(domain_id)s"
+msgstr ""
+"İzin özniteliklerinin beklenmedik katışımı - Kullanıcı: %(user_id)s, Grup: "
+"%(group_id)s, Proje: %(project_id)s, Alan: %(domain_id)s"
+
+#, python-format
+msgid "Unexpected status requested for JSON Home response, %s"
+msgstr "JSON Home yanıtı için beklenmedik durum istendi, %s"
+
+msgid "Unknown Target"
+msgstr "Bilinmeyen Hedef"
+
+#, python-format
+msgid "Unknown domain '%(name)s' specified by --domain-name"
+msgstr "--domain-name ile bilinmeyen alan '%(name)s' belirtilmiş"
+
+#, python-format
+msgid "Unknown token version %s"
+msgstr "Bilinmeyen jeton sürümü %s"
+
+#, python-format
+msgid "Unregistered dependency: %(name)s for %(targets)s"
+msgstr "Kaydı silinmiş bağımlılık: %(targets)s için %(name)s"
+
+msgid "Update of `parent_id` is not allowed."
+msgstr "`parent_id` güncellemesine izin verilmiyor."
+
+msgid "Use a project scoped token when attempting to create a SAML assertion"
+msgstr ""
+"SAML iddiası oluşturma girişimi sırasında proje kapsamlı bir jeton kullan"
+
+#, python-format
+msgid "User %(u_id)s is unauthorized for tenant %(t_id)s"
+msgstr "%(u_id)s kullanıcısı %(t_id)s kiracısı için yetkilendirilmemiş"
+
+#, python-format
+msgid "User %(user_id)s already has role %(role_id)s in tenant %(tenant_id)s"
+msgstr ""
+"Kullanıcı %(user_id)s zaten %(tenant_id)s kiracısı içinde bir %(role_id)s "
+"rolüne sahip"
+
+#, python-format
+msgid "User %(user_id)s has no access to domain %(domain_id)s"
+msgstr "%(user_id)s kullanıcısının %(domain_id)s alanına erişimi yok"
+
+#, python-format
+msgid "User %(user_id)s has no access to project %(project_id)s"
+msgstr "%(user_id)s kullanıcısının %(project_id)s projesine erişimi yok"
+
+#, python-format
+msgid "User %(user_id)s is already a member of group %(group_id)s"
+msgstr "Kullanıcı %(user_id)s zaten %(group_id)s grubu üyesi"
+
+#, python-format
+msgid "User '%(user_id)s' not found in group '%(group_id)s'"
+msgstr "Kullanıcı '%(user_id)s' '%(group_id)s' grubunda bulunamadı"
+
+msgid "User IDs do not match"
+msgstr "Kullanıcı ID leri uyuşmuyor"
+
+#, python-format
+msgid "User is disabled: %s"
+msgstr "Kullanıcı kapalı: %s"
+
+msgid "User is not a member of the requested project"
+msgstr "Kullanıcı istenen projenin üyesi değil"
+
+msgid "User is not a trustee."
+msgstr "Kullanıcı güvenilir değil."
+
+msgid "User not found"
+msgstr "Kullanıcı bulunamadı"
+
+msgid "User roles not supported: tenant_id required"
+msgstr "Kullanıcı rolleri desteklenmiyor: tenant_id gerekli"
+
+#, python-format
+msgid "User type %s not supported"
+msgstr "Kullanıcı türü %s desteklenmiyor"
+
+msgid "You are not authorized to perform the requested action."
+msgstr "İstenen eylemi gerçekleştirmek için yetkili değilsiniz."
+
+#, python-format
+msgid "You are not authorized to perform the requested action: %(action)s"
+msgstr "İstenen eylemi gerçekleştirmek için yetkili değilsiniz: %(action)s"
+
+msgid "`key_mangler` functions must be callable."
+msgstr "`key_mangler` fonksiyonları çağrılabilir olmalı."
+
+msgid "`key_mangler` option must be a function reference"
+msgstr "`key_mangler` seçeneği fonksiyon referansı olmalı"
+
+msgid "any options"
+msgstr "herhangi bir seçenek"
+
+msgid "auth_type is not Negotiate"
+msgstr "auth_type Negotiate değil"
+
+msgid "authorizing user does not have role required"
+msgstr "yetkilendiren kullanıcı gerekli role sahip değil"
+
+msgid "cache_collection name is required"
+msgstr "cache_collection ismi gerekli"
+
+#, python-format
+msgid "cannot create a project in a branch containing a disabled project: %s"
+msgstr "kapalı bir proje içeren bir alt grupta proje oluşturulamaz: %s"
+
+msgid "cannot create a project within a different domain than its parents."
+msgstr "üst projelerinden farklı alanda bir proje oluşturulamaz."
+
+msgid "cannot delete a domain that is enabled, please disable it first."
+msgstr "etkin alan silinemez, lütfen önce kapatın."
+
+#, python-format
+msgid "cannot delete the project %s since it is not a leaf in the hierarchy."
+msgstr "%s projesi silinemiyor çünkü sıradüzen içindeki bir yaprak değil."
+
+#, python-format
+msgid "cannot disable project %s since its subtree contains enabled projects"
+msgstr "proje %s kapatılamıyor çünkü alt ağacında etkin projeler var"
+
+#, python-format
+msgid "cannot enable project %s since it has disabled parents"
+msgstr "proje %s etkinleştirilemiyor çünkü üstleri kapatılmış"
+
+msgid "database db_name is required"
+msgstr "veri tabanı db_name gerekli"
+
+msgid "db_hosts value is required"
+msgstr "db_hosts değeri gerekli"
+
+msgid "delete the default domain"
+msgstr "varsayılan alanı sil"
+
+#, python-format
+msgid "group %(group)s"
+msgstr "grup %(group)s"
+
+msgid ""
+"idp_contact_type must be one of: [technical, other, support, administrative "
+"or billing."
+msgstr ""
+"idp_contact_type şunlardan biri olmalı: [teknik, diğer, destek, idari veya "
+"faturalama."
+
+msgid "integer value expected for mongo_ttl_seconds"
+msgstr "mongo_ttl_seconds için tam sayı değer bekleniyor"
+
+msgid "integer value expected for w (write concern attribute)"
+msgstr "w için tam sayı değer bekleniyor (yazma ilgisi özniteliği)"
+
+#, python-format
+msgid "invalid date format %s"
+msgstr "geçersiz tarih biçimi %s"
+
+#, python-format
+msgid "max hierarchy depth reached for %s branch."
+msgstr "%s alt grubu için azami sıralı dizi derinliğine ulaşıldı."
+
+msgid "no ssl support available"
+msgstr "ssl desteği yok"
+
+#, python-format
+msgid "option %(option)s in group %(group)s"
+msgstr "%(group)s grubundaki %(option)s seçeneği"
+
+msgid "pad must be single character"
+msgstr "dolgu tek bir karakter olmalı"
+
+msgid "padded base64url text must be multiple of 4 characters"
+msgstr "dolgulanmış base64url metni 4 karakterin katı olmalı"
+
+msgid "provided consumer key does not match stored consumer key"
+msgstr "sağlanan tüketici anahtarı depolanan tüketici anahtarıyla eşleşmiyor"
+
+msgid "provided request key does not match stored request key"
+msgstr "sağlanan istek anahtarı depolanan istek anahtarıyla eşleşmiyor"
+
+msgid "provided verifier does not match stored verifier"
+msgstr "sağlanan doğrulayıcı depolanan doğrulayıcı ile eşleşmiyor"
+
+msgid "region not type dogpile.cache.CacheRegion"
+msgstr "bölge dogpile.cache.CacheRegion türünde değil"
+
+msgid "remaining_uses must be a positive integer or null."
+msgstr "remaining_uses pozitif bir değer ya da null olmalı."
+
+msgid "remaining_uses must not be set if redelegation is allowed"
+msgstr "tekrar yetkilendirmeye izin veriliyorsa remaining_uses ayarlanmamalı"
+
+msgid "replicaset_name required when use_replica is True"
+msgstr "use_replica True olduğunda replicaset_name gereklidir"
+
+#, python-format
+msgid ""
+"request to update group %(group)s, but config provided contains group "
+"%(group_other)s instead"
+msgstr ""
+"%(group)s grubunu güncelleme isteği, ama sağlanan yapılandırma "
+"%(group_other)s grubunu içeriyor"
+
+msgid "rescope a scoped token"
+msgstr "kapsamlı bir jeton tekrar kapsamlandı"
+
+#, python-format
+msgid "text is multiple of 4, but pad \"%s\" occurs before 2nd to last char"
+msgstr "metin 4'ün katı, ama dolgu \"%s\" son karaktere 2 önceden önce"
+
+#, python-format
+msgid "text is multiple of 4, but pad \"%s\" occurs before non-pad last char"
+msgstr ""
+"metin 4'ün katı, ama doldurma \"%s\" doldurma karakteri olmayan son "
+"karakterden önce"
+
+#, python-format
+msgid "text is not a multiple of 4, but contains pad \"%s\""
+msgstr "metin 4'ün katı değil, ama \"%s\" dolgusu içeriyor"
+
+#, python-format
+msgid "tls_cacertdir %s not found or is not a directory"
+msgstr "tls_cacertdir %s bulunamadı ya da bir dizin"
+
+#, python-format
+msgid "tls_cacertfile %s not found or is not a file"
+msgstr "tls_cacertfile %s bulunamadı ya da bir dosya değil"
+
+#, python-format
+msgid "token reference must be a KeystoneToken type, got: %s"
+msgstr "jeton referansı bir KeystoneToken türünde olmalı, alınan: %s"
diff --git a/keystone-moon/keystone/locale/zh_CN/LC_MESSAGES/keystone-log-critical.po b/keystone-moon/keystone/locale/zh_CN/LC_MESSAGES/keystone-log-critical.po
index a3a728e9..cbdab8a4 100644
--- a/keystone-moon/keystone/locale/zh_CN/LC_MESSAGES/keystone-log-critical.po
+++ b/keystone-moon/keystone/locale/zh_CN/LC_MESSAGES/keystone-log-critical.po
@@ -1,5 +1,5 @@
# Translations template for keystone.
-# Copyright (C) 2014 OpenStack Foundation
+# Copyright (C) 2015 OpenStack Foundation
# This file is distributed under the same license as the keystone project.
#
# Translators:
@@ -7,19 +7,18 @@ msgid ""
msgstr ""
"Project-Id-Version: Keystone\n"
"Report-Msgid-Bugs-To: https://bugs.launchpad.net/keystone\n"
-"POT-Creation-Date: 2014-09-07 06:06+0000\n"
+"POT-Creation-Date: 2015-08-06 06:28+0000\n"
"PO-Revision-Date: 2014-08-31 15:19+0000\n"
"Last-Translator: openstackjenkins <jenkins@openstack.org>\n"
-"Language-Team: Chinese (China) (http://www.transifex.com/projects/p/keystone/"
+"Language-Team: Chinese (China) (http://www.transifex.com/openstack/keystone/"
"language/zh_CN/)\n"
"Language: zh_CN\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
-"Generated-By: Babel 1.3\n"
+"Generated-By: Babel 2.0\n"
"Plural-Forms: nplurals=1; plural=0;\n"
-#: keystone/catalog/backends/templated.py:106
#, python-format
msgid "Unable to open template file %s"
msgstr "无法打开模板文件 %s"
diff --git a/keystone-moon/keystone/locale/zh_CN/LC_MESSAGES/keystone-log-error.po b/keystone-moon/keystone/locale/zh_CN/LC_MESSAGES/keystone-log-error.po
index a48b9382..da273412 100644
--- a/keystone-moon/keystone/locale/zh_CN/LC_MESSAGES/keystone-log-error.po
+++ b/keystone-moon/keystone/locale/zh_CN/LC_MESSAGES/keystone-log-error.po
@@ -4,71 +4,57 @@
#
# Translators:
# Xiao Xi LIU <liuxx@cn.ibm.com>, 2014
+# 刘俊朋 <liujunpeng@inspur.com>, 2015
msgid ""
msgstr ""
"Project-Id-Version: Keystone\n"
"Report-Msgid-Bugs-To: https://bugs.launchpad.net/keystone\n"
-"POT-Creation-Date: 2015-03-09 06:03+0000\n"
-"PO-Revision-Date: 2015-03-07 04:31+0000\n"
+"POT-Creation-Date: 2015-08-06 06:28+0000\n"
+"PO-Revision-Date: 2015-06-26 17:13+0000\n"
"Last-Translator: openstackjenkins <jenkins@openstack.org>\n"
-"Language-Team: Chinese (China) (http://www.transifex.com/projects/p/keystone/"
+"Language-Team: Chinese (China) (http://www.transifex.com/openstack/keystone/"
"language/zh_CN/)\n"
"Language: zh_CN\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
-"Generated-By: Babel 1.3\n"
+"Generated-By: Babel 2.0\n"
"Plural-Forms: nplurals=1; plural=0;\n"
-#: keystone/notifications.py:304
-msgid "Failed to construct notifier"
-msgstr ""
-
-#: keystone/notifications.py:389
-#, python-format
-msgid "Failed to send %(res_id)s %(event_type)s notification"
-msgstr ""
-
-#: keystone/notifications.py:606
-#, python-format
-msgid "Failed to send %(action)s %(event_type)s notification"
-msgstr ""
-
-#: keystone/catalog/core.py:62
-#, python-format
-msgid "Malformed endpoint - %(url)r is not a string"
-msgstr ""
+msgid "Cannot retrieve Authorization headers"
+msgstr "无法获取认证头信息"
-#: keystone/catalog/core.py:66
#, python-format
-msgid "Malformed endpoint %(url)s - unknown key %(keyerror)s"
-msgstr "端点 %(url)s 的格式不正确 - 键 %(keyerror)s 未知"
+msgid ""
+"Circular reference or a repeated entry found in projects hierarchy - "
+"%(project_id)s."
+msgstr "在项目树-%(project_id)s 中发现循环引用或重复项。"
-#: keystone/catalog/core.py:71
#, python-format
msgid ""
-"Malformed endpoint '%(url)s'. The following type error occurred during "
-"string substitution: %(typeerror)s"
-msgstr ""
-"端点 '%(url)s' 的格式不正确。在字符串替换时发生以下类型错误:%(typeerror)s"
+"Circular reference or a repeated entry found in region tree - %(region_id)s."
+msgstr "在域树- %(region_id)s 中发现循环引用或重复项。"
-#: keystone/catalog/core.py:77
#, python-format
msgid ""
-"Malformed endpoint %s - incomplete format (are you missing a type notifier ?)"
-msgstr "端点 %s 的格式不完整 - (是否缺少了类型通告者?)"
+"Circular reference or a repeated entry found projects hierarchy - "
+"%(project_id)s."
+msgstr "在项目树-%(project_id)s 中发现循环引用或重复项。"
-#: keystone/common/openssl.py:93
#, python-format
msgid "Command %(to_exec)s exited with %(retcode)s- %(output)s"
msgstr "命令 %(to_exec)s 已退出,退出码及输出为 %(retcode)s- %(output)s"
-#: keystone/common/openssl.py:121
#, python-format
-msgid "Failed to remove file %(file_path)r: %(error)s"
-msgstr "无法删除文件%(file_path)r: %(error)s"
+msgid "Could not bind to %(host)s:%(port)s"
+msgstr "无法绑定至 %(host)s:%(port)s"
+
+#, python-format
+msgid ""
+"Either [fernet_tokens] key_repository does not exist or Keystone does not "
+"have sufficient permission to access it: %s"
+msgstr "[fernet_tokens] 键仓库不存在或者ketystone没有足够的权限去访问它: %s。"
-#: keystone/common/utils.py:239
msgid ""
"Error setting up the debug environment. Verify that the option --debug-url "
"has the format <host>:<port> and that a debugger processes is listening on "
@@ -77,101 +63,82 @@ msgstr ""
"设置调试环境出错。请确保选项--debug-url 的格式是这样的<host>:<port> ,和确保"
"有一个调试进程正在监听那个端口"
-#: keystone/common/cache/core.py:100
#, python-format
-msgid ""
-"Unable to build cache config-key. Expected format \"<argname>:<value>\". "
-"Skipping unknown format: %s"
-msgstr ""
-
-#: keystone/common/environment/eventlet_server.py:99
-#, python-format
-msgid "Could not bind to %(host)s:%(port)s"
-msgstr "无法绑定至 %(host)s:%(port)s"
+msgid "Error when signing assertion, reason: %(reason)s"
+msgstr "对断言进行签名时出错,原因:%(reason)s"
-#: keystone/common/environment/eventlet_server.py:185
-msgid "Server error"
-msgstr "服务器报错"
+msgid "Failed to construct notifier"
+msgstr "构造通知器失败"
-#: keystone/contrib/endpoint_policy/core.py:129
-#: keystone/contrib/endpoint_policy/core.py:228
-#, python-format
msgid ""
-"Circular reference or a repeated entry found in region tree - %(region_id)s."
-msgstr "在域树- %(region_id)s 中发现循环引用或重复项。"
+"Failed to create [fernet_tokens] key_repository: either it already exists or "
+"you don't have sufficient permissions to create it"
+msgstr "创建[Fernet_tokens] 键仓库失败:它已存在或你没有足够的权限去创建它。"
-#: keystone/contrib/federation/idp.py:410
#, python-format
-msgid "Error when signing assertion, reason: %(reason)s"
-msgstr "对断言进行签名时出错,原因:%(reason)s"
-
-#: keystone/contrib/oauth1/core.py:136
-msgid "Cannot retrieve Authorization headers"
-msgstr ""
+msgid "Failed to remove file %(file_path)r: %(error)s"
+msgstr "无法删除文件%(file_path)r: %(error)s"
-#: keystone/openstack/common/loopingcall.py:95
-msgid "in fixed duration looping call"
-msgstr "在固定时段内循环调用"
+#, python-format
+msgid "Failed to send %(action)s %(event_type)s notification"
+msgstr "发送 %(action)s %(event_type)s 通知失败"
-#: keystone/openstack/common/loopingcall.py:138
-msgid "in dynamic looping call"
-msgstr "在动态循环调用中"
+#, python-format
+msgid "Failed to send %(res_id)s %(event_type)s notification"
+msgstr "发送%(res_id)s %(event_type)s 通知失败"
-#: keystone/openstack/common/service.py:268
-msgid "Unhandled exception"
-msgstr "存在未处理的异常"
+msgid "Failed to validate token"
+msgstr "token验证失败"
-#: keystone/resource/core.py:477
#, python-format
-msgid ""
-"Circular reference or a repeated entry found projects hierarchy - "
-"%(project_id)s."
-msgstr ""
+msgid "Malformed endpoint %(url)s - unknown key %(keyerror)s"
+msgstr "端点 %(url)s 的格式不正确 - 键 %(keyerror)s 未知"
-#: keystone/resource/core.py:939
#, python-format
msgid ""
-"Unexpected results in response for domain config - %(count)s responses, "
-"first option is %(option)s, expected option %(expected)s"
-msgstr ""
+"Malformed endpoint %s - incomplete format (are you missing a type notifier ?)"
+msgstr "端点 %s 的格式不完整 - (是否缺少了类型通告者?)"
-#: keystone/resource/backends/sql.py:102 keystone/resource/backends/sql.py:121
#, python-format
msgid ""
-"Circular reference or a repeated entry found in projects hierarchy - "
-"%(project_id)s."
+"Malformed endpoint '%(url)s'. The following type error occurred during "
+"string substitution: %(typeerror)s"
msgstr ""
+"端点 '%(url)s' 的格式不正确。在字符串替换时发生以下类型错误:%(typeerror)s"
-#: keystone/token/provider.py:292
#, python-format
-msgid "Unexpected error or malformed token determining token expiry: %s"
-msgstr ""
+msgid "Malformed endpoint - %(url)r is not a string"
+msgstr "端点 - %(url)r 不是一个字符串"
-#: keystone/token/persistence/backends/kvs.py:226
#, python-format
msgid ""
"Reinitializing revocation list due to error in loading revocation list from "
"backend. Expected `list` type got `%(type)s`. Old revocation list data: "
"%(list)r"
msgstr ""
+"由于从后端加载撤销列表出现错误,重新初始化撤销列表。期望“列表”类型是 `"
+"%(type)s`。旧的撤销列表数据是: %(list)r"
-#: keystone/token/providers/common.py:611
-msgid "Failed to validate token"
-msgstr "token验证失败"
+msgid "Server error"
+msgstr "服务器报错"
+
+#, python-format
+msgid ""
+"Unable to build cache config-key. Expected format \"<argname>:<value>\". "
+"Skipping unknown format: %s"
+msgstr "无法构建缓存配置键值对。期望格式“<参数>:<值>”。跳过未知的格式: %s"
-#: keystone/token/providers/pki.py:47
msgid "Unable to sign token"
-msgstr ""
+msgstr "无法签名令牌"
-#: keystone/token/providers/fernet/utils.py:38
#, python-format
-msgid ""
-"Either [fernet_tokens] key_repository does not exist or Keystone does not "
-"have sufficient permission to access it: %s"
-msgstr ""
+msgid "Unexpected error or malformed token determining token expiry: %s"
+msgstr "决策令牌预计超期时间 :%s 时,出现未知错误或变形的令牌"
-#: keystone/token/providers/fernet/utils.py:79
+#, python-format
msgid ""
-"Failed to create [fernet_tokens] key_repository: either it already exists or "
-"you don't have sufficient permissions to create it"
+"Unexpected results in response for domain config - %(count)s responses, "
+"first option is %(option)s, expected option %(expected)s"
msgstr ""
+"针对域配置- %(count)s 结果,响应中出现不是预期结果,第一参数是%(option)s,期"
+"望参数是 %(expected)s 。"
diff --git a/keystone-moon/keystone/locale/zh_CN/LC_MESSAGES/keystone-log-info.po b/keystone-moon/keystone/locale/zh_CN/LC_MESSAGES/keystone-log-info.po
index 0e848ee1..92f06dcb 100644
--- a/keystone-moon/keystone/locale/zh_CN/LC_MESSAGES/keystone-log-info.po
+++ b/keystone-moon/keystone/locale/zh_CN/LC_MESSAGES/keystone-log-info.po
@@ -8,33 +8,18 @@ msgid ""
msgstr ""
"Project-Id-Version: Keystone\n"
"Report-Msgid-Bugs-To: https://bugs.launchpad.net/keystone\n"
-"POT-Creation-Date: 2015-03-09 06:03+0000\n"
-"PO-Revision-Date: 2015-03-07 08:47+0000\n"
+"POT-Creation-Date: 2015-08-06 06:28+0000\n"
+"PO-Revision-Date: 2015-08-01 06:26+0000\n"
"Last-Translator: openstackjenkins <jenkins@openstack.org>\n"
-"Language-Team: Chinese (China) (http://www.transifex.com/projects/p/keystone/"
+"Language-Team: Chinese (China) (http://www.transifex.com/openstack/keystone/"
"language/zh_CN/)\n"
"Language: zh_CN\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
-"Generated-By: Babel 1.3\n"
+"Generated-By: Babel 2.0\n"
"Plural-Forms: nplurals=1; plural=0;\n"
-#: keystone/assignment/core.py:250
-#, python-format
-msgid "Creating the default role %s because it does not exist."
-msgstr "正在创建默认角色%s,因为它之前不存在。"
-
-#: keystone/assignment/core.py:258
-#, python-format
-msgid "Creating the default role %s failed because it was already created"
-msgstr ""
-
-#: keystone/auth/controllers.py:64
-msgid "Loading auth-plugins by class-name is deprecated."
-msgstr "通过class-name(类名)加载auth-plugins(认证插件)的方式已被弃用。"
-
-#: keystone/auth/controllers.py:106
#, python-format
msgid ""
"\"expires_at\" has conflicting values %(existing)s and %(new)s. Will use "
@@ -43,173 +28,55 @@ msgstr ""
"\"expires_at\" 被赋予矛盾的值: %(existing)s 和 %(new)s。将采用时间上较早的那"
"个值。"
-#: keystone/common/openssl.py:81
-#, python-format
-msgid "Running command - %s"
-msgstr "正在运行命令 - %s"
-
-#: keystone/common/wsgi.py:79
-msgid "No bind information present in token"
-msgstr "令牌中暂无绑定信息"
-
-#: keystone/common/wsgi.py:83
-#, python-format
-msgid "Named bind mode %s not in bind information"
-msgstr "在绑定信息中没有命名绑定模式%s"
-
-#: keystone/common/wsgi.py:90
-msgid "Kerberos credentials required and not present"
-msgstr "没有所需的Kerberos凭证"
-
-#: keystone/common/wsgi.py:94
-msgid "Kerberos credentials do not match those in bind"
-msgstr "在绑定中没有匹配的Kerberos凭证"
-
-#: keystone/common/wsgi.py:98
-msgid "Kerberos bind authentication successful"
-msgstr "Kerberos绑定认证成功"
-
-#: keystone/common/wsgi.py:105
-#, python-format
-msgid "Couldn't verify unknown bind: {%(bind_type)s: %(identifier)s}"
-msgstr "不能验证未知绑定: {%(bind_type)s: %(identifier)s}"
-
-#: keystone/common/environment/eventlet_server.py:103
-#, python-format
-msgid "Starting %(arg0)s on %(host)s:%(port)s"
-msgstr "正在 %(host)s:%(port)s 上启动 %(arg0)s"
-
-#: keystone/common/kvs/core.py:138
#, python-format
msgid "Adding proxy '%(proxy)s' to KVS %(name)s."
msgstr "正在将代理'%(proxy)s'加入KVS %(name)s 中。"
-#: keystone/common/kvs/core.py:188
#, python-format
-msgid "Using %(func)s as KVS region %(name)s key_mangler"
-msgstr "使用 %(func)s 作为KVS域 %(name)s 的key_mangler处理函数"
+msgid "Couldn't verify unknown bind: {%(bind_type)s: %(identifier)s}"
+msgstr "不能验证未知绑定: {%(bind_type)s: %(identifier)s}"
-#: keystone/common/kvs/core.py:200
#, python-format
-msgid "Using default dogpile sha1_mangle_key as KVS region %s key_mangler"
-msgstr ""
-"使用默认的dogpile sha1_mangle_key函数作为KVS域 %s 的key_mangler处理函数"
+msgid "Creating the default role %s because it does not exist."
+msgstr "正在创建默认角色%s,因为它之前不存在。"
-#: keystone/common/kvs/core.py:210
#, python-format
msgid "KVS region %s key_mangler disabled."
msgstr "KVS域 %s 的key_mangler处理函数被禁用。"
-#: keystone/contrib/example/core.py:64 keystone/contrib/example/core.py:73
-#, python-format
-msgid ""
-"Received the following notification: service %(service)s, resource_type: "
-"%(resource_type)s, operation %(operation)s payload %(payload)s"
-msgstr ""
-
-#: keystone/openstack/common/eventlet_backdoor.py:146
-#, python-format
-msgid "Eventlet backdoor listening on %(port)s for process %(pid)d"
-msgstr "携程为进程 %(pid)d 在后台监听 %(port)s "
-
-#: keystone/openstack/common/service.py:173
-#, python-format
-msgid "Caught %s, exiting"
-msgstr "捕获到 %s,正在退出"
-
-#: keystone/openstack/common/service.py:231
-msgid "Parent process has died unexpectedly, exiting"
-msgstr "父进程已意外终止,正在退出"
-
-#: keystone/openstack/common/service.py:262
-#, python-format
-msgid "Child caught %s, exiting"
-msgstr "子代捕获 %s,正在退出"
-
-#: keystone/openstack/common/service.py:301
-msgid "Forking too fast, sleeping"
-msgstr "派生速度太快,正在休眠"
+msgid "Kerberos bind authentication successful"
+msgstr "Kerberos绑定认证成功"
-#: keystone/openstack/common/service.py:320
-#, python-format
-msgid "Started child %d"
-msgstr "已启动子代 %d"
+msgid "Kerberos credentials do not match those in bind"
+msgstr "在绑定中没有匹配的Kerberos凭证"
-#: keystone/openstack/common/service.py:330
-#, python-format
-msgid "Starting %d workers"
-msgstr "正在启动 %d 工作程序"
+msgid "Kerberos credentials required and not present"
+msgstr "没有所需的Kerberos凭证"
-#: keystone/openstack/common/service.py:347
#, python-format
-msgid "Child %(pid)d killed by signal %(sig)d"
-msgstr "信号 %(sig)d 已终止子代 %(pid)d"
+msgid "Named bind mode %s not in bind information"
+msgstr "在绑定信息中没有命名绑定模式%s"
-#: keystone/openstack/common/service.py:351
-#, python-format
-msgid "Child %(pid)s exited with status %(code)d"
-msgstr "子代 %(pid)s 已退出,状态为 %(code)d"
+msgid "No bind information present in token"
+msgstr "令牌中暂无绑定信息"
-#: keystone/openstack/common/service.py:390
#, python-format
-msgid "Caught %s, stopping children"
-msgstr "捕获到 %s,正在停止子代"
-
-#: keystone/openstack/common/service.py:399
-msgid "Wait called after thread killed. Cleaning up."
-msgstr "线程结束,正在清理"
+msgid "Running command - %s"
+msgstr "正在运行命令 - %s"
-#: keystone/openstack/common/service.py:415
#, python-format
-msgid "Waiting on %d children to exit"
-msgstr "正在等待 %d 个子代退出"
+msgid "Starting %(arg0)s on %(host)s:%(port)s"
+msgstr "正在 %(host)s:%(port)s 上启动 %(arg0)s"
-#: keystone/token/persistence/backends/sql.py:279
#, python-format
msgid "Total expired tokens removed: %d"
msgstr "被移除的失效令牌总数:%d"
-#: keystone/token/providers/fernet/utils.py:72
-msgid ""
-"[fernet_tokens] key_repository does not appear to exist; attempting to "
-"create it"
-msgstr ""
-
-#: keystone/token/providers/fernet/utils.py:130
-#, python-format
-msgid "Created a new key: %s"
-msgstr ""
-
-#: keystone/token/providers/fernet/utils.py:143
-msgid "Key repository is already initialized; aborting."
-msgstr ""
-
-#: keystone/token/providers/fernet/utils.py:179
-#, python-format
-msgid "Starting key rotation with %(count)s key files: %(list)s"
-msgstr ""
-
-#: keystone/token/providers/fernet/utils.py:185
-#, python-format
-msgid "Current primary key is: %s"
-msgstr ""
-
-#: keystone/token/providers/fernet/utils.py:187
-#, python-format
-msgid "Next primary key will be: %s"
-msgstr ""
-
-#: keystone/token/providers/fernet/utils.py:197
-#, python-format
-msgid "Promoted key 0 to be the primary: %s"
-msgstr ""
-
-#: keystone/token/providers/fernet/utils.py:213
#, python-format
-msgid "Excess keys to purge: %s"
-msgstr ""
+msgid "Using %(func)s as KVS region %(name)s key_mangler"
+msgstr "使用 %(func)s 作为KVS域 %(name)s 的key_mangler处理函数"
-#: keystone/token/providers/fernet/utils.py:237
#, python-format
-msgid "Loaded %(count)s encryption keys from: %(dir)s"
+msgid "Using default dogpile sha1_mangle_key as KVS region %s key_mangler"
msgstr ""
+"使用默认的dogpile sha1_mangle_key函数作为KVS域 %s 的key_mangler处理函数"
diff --git a/keystone-moon/keystone/locale/zh_TW/LC_MESSAGES/keystone-log-critical.po b/keystone-moon/keystone/locale/zh_TW/LC_MESSAGES/keystone-log-critical.po
index b0ff57c9..c2e8b9ea 100644
--- a/keystone-moon/keystone/locale/zh_TW/LC_MESSAGES/keystone-log-critical.po
+++ b/keystone-moon/keystone/locale/zh_TW/LC_MESSAGES/keystone-log-critical.po
@@ -1,5 +1,5 @@
# Translations template for keystone.
-# Copyright (C) 2014 OpenStack Foundation
+# Copyright (C) 2015 OpenStack Foundation
# This file is distributed under the same license as the keystone project.
#
# Translators:
@@ -7,19 +7,18 @@ msgid ""
msgstr ""
"Project-Id-Version: Keystone\n"
"Report-Msgid-Bugs-To: https://bugs.launchpad.net/keystone\n"
-"POT-Creation-Date: 2014-09-07 06:06+0000\n"
+"POT-Creation-Date: 2015-08-06 06:28+0000\n"
"PO-Revision-Date: 2014-08-31 15:19+0000\n"
"Last-Translator: openstackjenkins <jenkins@openstack.org>\n"
-"Language-Team: Chinese (Taiwan) (http://www.transifex.com/projects/p/"
-"keystone/language/zh_TW/)\n"
+"Language-Team: Chinese (Taiwan) (http://www.transifex.com/openstack/keystone/"
+"language/zh_TW/)\n"
"Language: zh_TW\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
-"Generated-By: Babel 1.3\n"
+"Generated-By: Babel 2.0\n"
"Plural-Forms: nplurals=1; plural=0;\n"
-#: keystone/catalog/backends/templated.py:106
#, python-format
msgid "Unable to open template file %s"
msgstr "無法開啟範本檔 %s"
diff --git a/keystone-moon/keystone/middleware/core.py b/keystone-moon/keystone/middleware/core.py
index bf86cd2b..62ff291a 100644
--- a/keystone-moon/keystone/middleware/core.py
+++ b/keystone-moon/keystone/middleware/core.py
@@ -14,16 +14,16 @@
from oslo_config import cfg
from oslo_log import log
+from oslo_log import versionutils
from oslo_middleware import sizelimit
from oslo_serialization import jsonutils
-import six
from keystone.common import authorization
from keystone.common import wsgi
from keystone import exception
from keystone.i18n import _LW
from keystone.models import token_model
-from keystone.openstack.common import versionutils
+
CONF = cfg.CONF
LOG = log.getLogger(__name__)
@@ -51,8 +51,7 @@ class TokenAuthMiddleware(wsgi.Middleware):
context = request.environ.get(CONTEXT_ENV, {})
context['token_id'] = token
if SUBJECT_TOKEN_HEADER in request.headers:
- context['subject_token_id'] = (
- request.headers.get(SUBJECT_TOKEN_HEADER))
+ context['subject_token_id'] = request.headers[SUBJECT_TOKEN_HEADER]
request.environ[CONTEXT_ENV] = context
@@ -82,7 +81,7 @@ class PostParamsMiddleware(wsgi.Middleware):
def process_request(self, request):
params_parsed = request.params
params = {}
- for k, v in six.iteritems(params_parsed):
+ for k, v in params_parsed.items():
if k in ('self', 'context'):
continue
if k.startswith('_'):
@@ -132,7 +131,7 @@ class JsonBodyMiddleware(wsgi.Middleware):
return wsgi.render_exception(e, request=request)
params = {}
- for k, v in six.iteritems(params_parsed):
+ for k, v in params_parsed.items():
if k in ('self', 'context'):
continue
if k.startswith('_'):
@@ -142,35 +141,6 @@ class JsonBodyMiddleware(wsgi.Middleware):
request.environ[PARAMS_ENV] = params
-class XmlBodyMiddleware(wsgi.Middleware):
- """De/serialize XML to/from JSON."""
-
- def print_warning(self):
- LOG.warning(_LW('XML support has been removed as of the Kilo release '
- 'and should not be referenced or used in deployment. '
- 'Please remove references to XmlBodyMiddleware from '
- 'your configuration. This compatibility stub will be '
- 'removed in the L release'))
-
- def __init__(self, *args, **kwargs):
- super(XmlBodyMiddleware, self).__init__(*args, **kwargs)
- self.print_warning()
-
-
-class XmlBodyMiddlewareV2(XmlBodyMiddleware):
- """De/serialize XML to/from JSON for v2.0 API."""
-
- def __init__(self, *args, **kwargs):
- pass
-
-
-class XmlBodyMiddlewareV3(XmlBodyMiddleware):
- """De/serialize XML to/from JSON for v3 API."""
-
- def __init__(self, *args, **kwargs):
- pass
-
-
class NormalizingFilter(wsgi.Middleware):
"""Middleware filter to handle URL normalization."""
diff --git a/keystone-moon/keystone/models/token_model.py b/keystone-moon/keystone/models/token_model.py
index 3be22b96..2032fd19 100644
--- a/keystone-moon/keystone/models/token_model.py
+++ b/keystone-moon/keystone/models/token_model.py
@@ -17,7 +17,7 @@ from oslo_config import cfg
from oslo_utils import timeutils
import six
-from keystone.contrib import federation
+from keystone.contrib.federation import constants as federation_constants
from keystone import exception
from keystone.i18n import _
@@ -296,7 +296,8 @@ class KeystoneToken(dict):
@property
def is_federated_user(self):
try:
- return self.version is V3 and federation.FEDERATION in self['user']
+ return (self.version is V3 and
+ federation_constants.FEDERATION in self['user'])
except KeyError:
raise exception.UnexpectedError()
@@ -305,7 +306,7 @@ class KeystoneToken(dict):
if self.is_federated_user:
if self.version is V3:
try:
- groups = self['user'][federation.FEDERATION].get(
+ groups = self['user'][federation_constants.FEDERATION].get(
'groups', [])
return [g['id'] for g in groups]
except KeyError:
@@ -316,12 +317,15 @@ class KeystoneToken(dict):
def federation_idp_id(self):
if self.version is not V3 or not self.is_federated_user:
return None
- return self['user'][federation.FEDERATION]['identity_provider']['id']
+ return (
+ self['user'][federation_constants.FEDERATION]
+ ['identity_provider']['id'])
@property
def federation_protocol_id(self):
if self.version is V3 and self.is_federated_user:
- return self['user'][federation.FEDERATION]['protocol']['id']
+ return (self['user'][federation_constants.FEDERATION]['protocol']
+ ['id'])
return None
@property
diff --git a/keystone-moon/keystone/notifications.py b/keystone-moon/keystone/notifications.py
index 4a1cd333..801dd737 100644
--- a/keystone-moon/keystone/notifications.py
+++ b/keystone-moon/keystone/notifications.py
@@ -15,12 +15,14 @@
"""Notifications module for OpenStack Identity Service resources"""
import collections
+import functools
import inspect
import logging
import socket
from oslo_config import cfg
from oslo_log import log
+from oslo_log import versionutils
import oslo_messaging
import pycadf
from pycadf import cadftaxonomy as taxonomy
@@ -36,12 +38,12 @@ notifier_opts = [
cfg.StrOpt('default_publisher_id',
help='Default publisher_id for outgoing notifications'),
cfg.StrOpt('notification_format', default='basic',
+ choices=['basic', 'cadf'],
help='Define the notification format for Identity Service '
'events. A "basic" notification has information about '
'the resource being operated on. A "cadf" notification '
'has the same information, as well as information about '
- 'the initiator of the event. Valid options are: basic '
- 'and cadf'),
+ 'the initiator of the event.'),
]
config_section = None
@@ -55,6 +57,7 @@ _ACTIONS = collections.namedtuple(
'created, deleted, disabled, updated, internal')
ACTIONS = _ACTIONS(created='created', deleted='deleted', disabled='disabled',
updated='updated', internal='internal')
+"""The actions on resources."""
CADF_TYPE_MAP = {
'group': taxonomy.SECURITY_GROUP,
@@ -291,6 +294,54 @@ def register_event_callback(event, resource_type, callbacks):
LOG.debug(msg, {'callback': callback_str, 'event': event_str})
+def listener(cls):
+ """A class decorator to declare a class to be a notification listener.
+
+ A notification listener must specify the event(s) it is interested in by
+ defining a ``event_callbacks`` attribute or property. ``event_callbacks``
+ is a dictionary where the key is the type of event and the value is a
+ dictionary containing a mapping of resource types to callback(s).
+
+ :data:`.ACTIONS` contains constants for the currently
+ supported events. There is currently no single place to find constants for
+ the resource types.
+
+ Example::
+
+ @listener
+ class Something(object):
+
+ def __init__(self):
+ self.event_callbacks = {
+ notifications.ACTIONS.created: {
+ 'user': self._user_created_callback,
+ },
+ notifications.ACTIONS.deleted: {
+ 'project': [
+ self._project_deleted_callback,
+ self._do_cleanup,
+ ]
+ },
+ }
+
+ """
+
+ def init_wrapper(init):
+ @functools.wraps(init)
+ def __new_init__(self, *args, **kwargs):
+ init(self, *args, **kwargs)
+ _register_event_callbacks(self)
+ return __new_init__
+
+ def _register_event_callbacks(self):
+ for event, resource_types in self.event_callbacks.items():
+ for resource_type, callbacks in resource_types.items():
+ register_event_callback(event, resource_type, callbacks)
+
+ cls.__init__ = init_wrapper(cls.__init__)
+ return cls
+
+
def notify_event_callbacks(service, resource_type, operation, payload):
"""Sends a notification to registered extensions."""
if operation in _SUBSCRIBERS:
@@ -524,8 +575,10 @@ class CadfRoleAssignmentNotificationWrapper(object):
def __init__(self, operation):
self.action = '%s.%s' % (operation, self.ROLE_ASSIGNMENT)
- self.event_type = '%s.%s.%s' % (SERVICE, operation,
- self.ROLE_ASSIGNMENT)
+ self.deprecated_event_type = '%s.%s.%s' % (SERVICE, operation,
+ self.ROLE_ASSIGNMENT)
+ self.event_type = '%s.%s.%s' % (SERVICE, self.ROLE_ASSIGNMENT,
+ operation)
def __call__(self, f):
def wrapper(wrapped_self, role_id, *args, **kwargs):
@@ -581,19 +634,30 @@ class CadfRoleAssignmentNotificationWrapper(object):
audit_kwargs['inherited_to_projects'] = inherited
audit_kwargs['role'] = role_id
+ # For backward compatibility, send both old and new event_type.
+ # Deprecate old format and remove it in the next release.
+ event_types = [self.deprecated_event_type, self.event_type]
+ versionutils.deprecated(
+ as_of=versionutils.deprecated.KILO,
+ remove_in=+1,
+ what=('sending duplicate %s notification event type' %
+ self.deprecated_event_type),
+ in_favor_of='%s notification event type' % self.event_type)
try:
result = f(wrapped_self, role_id, *args, **kwargs)
except Exception:
- _send_audit_notification(self.action, initiator,
- taxonomy.OUTCOME_FAILURE,
- target, self.event_type,
- **audit_kwargs)
+ for event_type in event_types:
+ _send_audit_notification(self.action, initiator,
+ taxonomy.OUTCOME_FAILURE,
+ target, event_type,
+ **audit_kwargs)
raise
else:
- _send_audit_notification(self.action, initiator,
- taxonomy.OUTCOME_SUCCESS,
- target, self.event_type,
- **audit_kwargs)
+ for event_type in event_types:
+ _send_audit_notification(self.action, initiator,
+ taxonomy.OUTCOME_SUCCESS,
+ target, event_type,
+ **audit_kwargs)
return result
return wrapper
diff --git a/keystone-moon/keystone/policy/core.py b/keystone-moon/keystone/policy/core.py
index 1f02803f..7943b59e 100644
--- a/keystone-moon/keystone/policy/core.py
+++ b/keystone-moon/keystone/policy/core.py
@@ -36,6 +36,9 @@ class Manager(manager.Manager):
dynamically calls the backend.
"""
+
+ driver_namespace = 'keystone.policy'
+
_POLICY = 'policy'
def __init__(self):
diff --git a/keystone-moon/keystone/resource/backends/ldap.py b/keystone-moon/keystone/resource/backends/ldap.py
index 434c2b04..43684035 100644
--- a/keystone-moon/keystone/resource/backends/ldap.py
+++ b/keystone-moon/keystone/resource/backends/ldap.py
@@ -17,7 +17,7 @@ import uuid
from oslo_config import cfg
from oslo_log import log
-from keystone import clean
+from keystone.common import clean
from keystone.common import driver_hints
from keystone.common import ldap as common_ldap
from keystone.common import models
@@ -47,7 +47,7 @@ class Resource(resource.Driver):
self.project = ProjectApi(CONF)
def default_assignment_driver(self):
- return 'keystone.assignment.backends.ldap.Assignment'
+ return 'ldap'
def _set_default_parent_project(self, ref):
"""If the parent project ID has not been set, set it to None."""
@@ -60,6 +60,14 @@ class Resource(resource.Driver):
else:
raise ValueError(_('Expected dict or list: %s') % type(ref))
+ def _set_default_is_domain_project(self, ref):
+ if isinstance(ref, dict):
+ return dict(ref, is_domain=False)
+ elif isinstance(ref, list):
+ return [self._set_default_is_domain_project(x) for x in ref]
+ else:
+ raise ValueError(_('Expected dict or list: %s') % type(ref))
+
def _validate_parent_project_is_none(self, ref):
"""If a parent_id different from None was given,
raises InvalidProjectException.
@@ -69,8 +77,15 @@ class Resource(resource.Driver):
if parent_id is not None:
raise exception.InvalidParentProject(parent_id)
+ def _validate_is_domain_field_is_false(self, ref):
+ is_domain = ref.pop('is_domain', None)
+ if is_domain:
+ raise exception.ValidationError(_('LDAP does not support projects '
+ 'with is_domain flag enabled'))
+
def _set_default_attributes(self, project_ref):
project_ref = self._set_default_domain(project_ref)
+ project_ref = self._set_default_is_domain_project(project_ref)
return self._set_default_parent_project(project_ref)
def get_project(self, tenant_id):
@@ -116,8 +131,8 @@ class Resource(resource.Driver):
def create_project(self, tenant_id, tenant):
self.project.check_allow_create()
- tenant = self._validate_default_domain(tenant)
self._validate_parent_project_is_none(tenant)
+ self._validate_is_domain_field_is_false(tenant)
tenant['name'] = clean.project_name(tenant['name'])
data = tenant.copy()
if 'id' not in data or data['id'] is None:
@@ -130,6 +145,7 @@ class Resource(resource.Driver):
def update_project(self, tenant_id, tenant):
self.project.check_allow_update()
tenant = self._validate_default_domain(tenant)
+ self._validate_is_domain_field_is_false(tenant)
if 'name' in tenant:
tenant['name'] = clean.project_name(tenant['name'])
return self._set_default_attributes(
diff --git a/keystone-moon/keystone/resource/backends/sql.py b/keystone-moon/keystone/resource/backends/sql.py
index fb117240..3a0d8cea 100644
--- a/keystone-moon/keystone/resource/backends/sql.py
+++ b/keystone-moon/keystone/resource/backends/sql.py
@@ -13,7 +13,7 @@
from oslo_config import cfg
from oslo_log import log
-from keystone import clean
+from keystone.common import clean
from keystone.common import sql
from keystone import exception
from keystone.i18n import _LE
@@ -27,7 +27,7 @@ LOG = log.getLogger(__name__)
class Resource(keystone_resource.Driver):
def default_assignment_driver(self):
- return 'keystone.assignment.backends.sql.Assignment'
+ return 'sql'
def _get_project(self, session, project_id):
project_ref = session.query(Project).get(project_id)
@@ -91,10 +91,9 @@ class Resource(keystone_resource.Driver):
def list_projects_in_subtree(self, project_id):
with sql.transaction() as session:
- project = self._get_project(session, project_id).to_dict()
- children = self._get_children(session, [project['id']])
+ children = self._get_children(session, [project_id])
subtree = []
- examined = set(project['id'])
+ examined = set([project_id])
while children:
children_ids = set()
for ref in children:
@@ -106,7 +105,7 @@ class Resource(keystone_resource.Driver):
return
children_ids.add(ref['id'])
- examined.union(children_ids)
+ examined.update(children_ids)
subtree += children
children = self._get_children(session, children_ids)
return subtree
@@ -246,7 +245,7 @@ class Domain(sql.ModelBase, sql.DictBase):
class Project(sql.ModelBase, sql.DictBase):
__tablename__ = 'project'
attributes = ['id', 'name', 'domain_id', 'description', 'enabled',
- 'parent_id']
+ 'parent_id', 'is_domain']
id = sql.Column(sql.String(64), primary_key=True)
name = sql.Column(sql.String(64), nullable=False)
domain_id = sql.Column(sql.String(64), sql.ForeignKey('domain.id'),
@@ -255,6 +254,7 @@ class Project(sql.ModelBase, sql.DictBase):
enabled = sql.Column(sql.Boolean)
extra = sql.Column(sql.JsonBlob())
parent_id = sql.Column(sql.String(64), sql.ForeignKey('project.id'))
+ is_domain = sql.Column(sql.Boolean, default=False, nullable=False)
# Unique constraint across two columns to create the separation
# rather than just only 'name' being unique
__table_args__ = (sql.UniqueConstraint('domain_id', 'name'), {})
diff --git a/keystone-moon/keystone/resource/controllers.py b/keystone-moon/keystone/resource/controllers.py
index 886b5eb1..60c4e025 100644
--- a/keystone-moon/keystone/resource/controllers.py
+++ b/keystone-moon/keystone/resource/controllers.py
@@ -47,27 +47,37 @@ class Tenant(controller.V2Controller):
self.assert_admin(context)
tenant_refs = self.resource_api.list_projects_in_domain(
CONF.identity.default_domain_id)
- for tenant_ref in tenant_refs:
- tenant_ref = self.filter_domain_id(tenant_ref)
+ tenant_refs = [self.v3_to_v2_project(tenant_ref)
+ for tenant_ref in tenant_refs
+ if not tenant_ref.get('is_domain')]
params = {
'limit': context['query_string'].get('limit'),
'marker': context['query_string'].get('marker'),
}
return self.format_project_list(tenant_refs, **params)
+ def _assert_not_is_domain_project(self, project_id, project_ref=None):
+ # Projects acting as a domain should not be visible via v2
+ if not project_ref:
+ project_ref = self.resource_api.get_project(project_id)
+ if project_ref.get('is_domain'):
+ raise exception.ProjectNotFound(project_id)
+
@controller.v2_deprecated
def get_project(self, context, tenant_id):
# TODO(termie): this stuff should probably be moved to middleware
self.assert_admin(context)
ref = self.resource_api.get_project(tenant_id)
- return {'tenant': self.filter_domain_id(ref)}
+ self._assert_not_is_domain_project(tenant_id, ref)
+ return {'tenant': self.v3_to_v2_project(ref)}
@controller.v2_deprecated
def get_project_by_name(self, context, tenant_name):
self.assert_admin(context)
+ # Projects acting as a domain should not be visible via v2
ref = self.resource_api.get_project_by_name(
tenant_name, CONF.identity.default_domain_id)
- return {'tenant': self.filter_domain_id(ref)}
+ return {'tenant': self.v3_to_v2_project(ref)}
# CRUD Extension
@controller.v2_deprecated
@@ -83,23 +93,25 @@ class Tenant(controller.V2Controller):
tenant = self.resource_api.create_project(
tenant_ref['id'],
self._normalize_domain_id(context, tenant_ref))
- return {'tenant': self.filter_domain_id(tenant)}
+ return {'tenant': self.v3_to_v2_project(tenant)}
@controller.v2_deprecated
def update_project(self, context, tenant_id, tenant):
self.assert_admin(context)
- # Remove domain_id if specified - a v2 api caller should not
- # be specifying that
+ self._assert_not_is_domain_project(tenant_id)
+ # Remove domain_id and is_domain if specified - a v2 api caller
+ # should not be specifying that
clean_tenant = tenant.copy()
clean_tenant.pop('domain_id', None)
-
+ clean_tenant.pop('is_domain', None)
tenant_ref = self.resource_api.update_project(
tenant_id, clean_tenant)
- return {'tenant': tenant_ref}
+ return {'tenant': self.v3_to_v2_project(tenant_ref)}
@controller.v2_deprecated
def delete_project(self, context, tenant_id):
self.assert_admin(context)
+ self._assert_not_is_domain_project(tenant_id)
self.resource_api.delete_project(tenant_id)
@@ -201,9 +213,18 @@ class ProjectV3(controller.V3Controller):
def create_project(self, context, project):
ref = self._assign_unique_id(self._normalize_dict(project))
ref = self._normalize_domain_id(context, ref)
+
+ if ref.get('is_domain'):
+ msg = _('The creation of projects acting as domains is not '
+ 'allowed yet.')
+ raise exception.NotImplemented(msg)
+
initiator = notifications._get_request_audit_info(context)
- ref = self.resource_api.create_project(ref['id'], ref,
- initiator=initiator)
+ try:
+ ref = self.resource_api.create_project(ref['id'], ref,
+ initiator=initiator)
+ except exception.DomainNotFound as e:
+ raise exception.ValidationError(e)
return ProjectV3.wrap_member(context, ref)
@controller.filterprotected('domain_id', 'enabled', 'name',
diff --git a/keystone-moon/keystone/resource/core.py b/keystone-moon/keystone/resource/core.py
index 017eb4e7..ca69b729 100644
--- a/keystone-moon/keystone/resource/core.py
+++ b/keystone-moon/keystone/resource/core.py
@@ -10,7 +10,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-"""Main entry point into the resource service."""
+"""Main entry point into the Resource service."""
import abc
@@ -18,12 +18,11 @@ from oslo_config import cfg
from oslo_log import log
import six
-from keystone import clean
from keystone.common import cache
+from keystone.common import clean
from keystone.common import dependency
from keystone.common import driver_hints
from keystone.common import manager
-from keystone.contrib import federation
from keystone import exception
from keystone.i18n import _, _LE, _LW
from keystone import notifications
@@ -47,12 +46,15 @@ def calc_default_domain():
@dependency.requires('assignment_api', 'credential_api', 'domain_config_api',
'identity_api', 'revoke_api')
class Manager(manager.Manager):
- """Default pivot point for the resource backend.
+ """Default pivot point for the Resource backend.
See :mod:`keystone.common.manager.Manager` for more details on how this
dynamically calls the backend.
"""
+
+ driver_namespace = 'keystone.resource'
+
_DOMAIN = 'domain'
_PROJECT = 'project'
@@ -62,9 +64,8 @@ class Manager(manager.Manager):
resource_driver = CONF.resource.driver
if resource_driver is None:
- assignment_driver = (
- dependency.get_provider('assignment_api').driver)
- resource_driver = assignment_driver.default_resource_driver()
+ assignment_manager = dependency.get_provider('assignment_api')
+ resource_driver = assignment_manager.default_resource_driver()
super(Manager, self).__init__(resource_driver)
@@ -86,21 +87,23 @@ class Manager(manager.Manager):
tenant['enabled'] = clean.project_enabled(tenant['enabled'])
tenant.setdefault('description', '')
tenant.setdefault('parent_id', None)
+ tenant.setdefault('is_domain', False)
+ self.get_domain(tenant.get('domain_id'))
if tenant.get('parent_id') is not None:
parent_ref = self.get_project(tenant.get('parent_id'))
parents_list = self.list_project_parents(parent_ref['id'])
parents_list.append(parent_ref)
for ref in parents_list:
if ref.get('domain_id') != tenant.get('domain_id'):
- raise exception.ForbiddenAction(
- action=_('cannot create a project within a different '
- 'domain than its parents.'))
+ raise exception.ValidationError(
+ message=_('cannot create a project within a different '
+ 'domain than its parents.'))
if not ref.get('enabled', True):
- raise exception.ForbiddenAction(
- action=_('cannot create a project in a '
- 'branch containing a disabled '
- 'project: %s') % ref['id'])
+ raise exception.ValidationError(
+ message=_('cannot create a project in a '
+ 'branch containing a disabled '
+ 'project: %s') % ref['id'])
self._assert_max_hierarchy_depth(tenant.get('parent_id'),
parents_list)
@@ -135,14 +138,13 @@ class Manager(manager.Manager):
"""
# NOTE(marek-denis): We cannot create this attribute in the __init__ as
# config values are always initialized to default value.
- federated_domain = (CONF.federation.federated_domain_name or
- federation.FEDERATED_DOMAIN_KEYWORD).lower()
+ federated_domain = CONF.federation.federated_domain_name.lower()
if (domain.get('name') and domain['name'].lower() == federated_domain):
raise AssertionError(_('Domain cannot be named %s')
- % federated_domain)
+ % domain['name'])
if (domain_id.lower() == federated_domain):
raise AssertionError(_('Domain cannot have ID %s')
- % federated_domain)
+ % domain_id)
def assert_project_enabled(self, project_id, project=None):
"""Assert the project is enabled and its associated domain is enabled.
@@ -177,7 +179,7 @@ class Manager(manager.Manager):
'disabled parents') % project_id)
def _assert_whole_subtree_is_disabled(self, project_id):
- subtree_list = self.driver.list_projects_in_subtree(project_id)
+ subtree_list = self.list_projects_in_subtree(project_id)
for ref in subtree_list:
if ref.get('enabled', True):
raise exception.ForbiddenAction(
@@ -194,6 +196,11 @@ class Manager(manager.Manager):
raise exception.ForbiddenAction(
action=_('Update of `parent_id` is not allowed.'))
+ if ('is_domain' in tenant and
+ tenant['is_domain'] != original_tenant['is_domain']):
+ raise exception.ValidationError(
+ message=_('Update of `is_domain` is not allowed.'))
+
if 'enabled' in tenant:
tenant['enabled'] = clean.project_enabled(tenant['enabled'])
@@ -241,15 +248,23 @@ class Manager(manager.Manager):
user_projects = self.assignment_api.list_projects_for_user(user_id)
user_projects_ids = set([proj['id'] for proj in user_projects])
# Keep only the projects present in user_projects
- projects_list = [proj for proj in projects_list
- if proj['id'] in user_projects_ids]
+ return [proj for proj in projects_list
+ if proj['id'] in user_projects_ids]
+
+ def _assert_valid_project_id(self, project_id):
+ if project_id is None:
+ msg = _('Project field is required and cannot be empty.')
+ raise exception.ValidationError(message=msg)
+ # Check if project_id exists
+ self.get_project(project_id)
def list_project_parents(self, project_id, user_id=None):
+ self._assert_valid_project_id(project_id)
parents = self.driver.list_project_parents(project_id)
# If a user_id was provided, the returned list should be filtered
# against the projects this user has access to.
if user_id:
- self._filter_projects_list(parents, user_id)
+ parents = self._filter_projects_list(parents, user_id)
return parents
def _build_parents_as_ids_dict(self, project, parents_by_id):
@@ -296,11 +311,12 @@ class Manager(manager.Manager):
return parents_as_ids
def list_projects_in_subtree(self, project_id, user_id=None):
+ self._assert_valid_project_id(project_id)
subtree = self.driver.list_projects_in_subtree(project_id)
# If a user_id was provided, the returned list should be filtered
# against the projects this user has access to.
if user_id:
- self._filter_projects_list(subtree, user_id)
+ subtree = self._filter_projects_list(subtree, user_id)
return subtree
def _build_subtree_as_ids_dict(self, project_id, subtree_by_parent):
@@ -780,6 +796,9 @@ class Driver(object):
raise exception.DomainNotFound(domain_id=domain_id)
+MEMOIZE_CONFIG = cache.get_memoization_decorator(section='domain_config')
+
+
@dependency.provider('domain_config_api')
class DomainConfigManager(manager.Manager):
"""Default pivot point for the Domain Config backend."""
@@ -793,6 +812,8 @@ class DomainConfigManager(manager.Manager):
# Only those options that affect the domain-specific driver support in
# the identity manager are supported.
+ driver_namespace = 'keystone.resource.domain_config'
+
whitelisted_options = {
'identity': ['driver'],
'ldap': [
@@ -975,6 +996,10 @@ class DomainConfigManager(manager.Manager):
self.create_config_option(
domain_id, option['group'], option['option'], option['value'],
sensitive=True)
+ # Since we are caching on the full substituted config, we just
+ # invalidate here, rather than try and create the right result to
+ # cache.
+ self.get_config_with_sensitive_info.invalidate(self, domain_id)
return self._list_to_config(whitelisted)
def get_config(self, domain_id, group=None, option=None):
@@ -999,7 +1024,7 @@ class DomainConfigManager(manager.Manager):
'url': 'myurl'
'user_tree_dn': 'OU=myou'},
'identity': {
- 'driver': 'keystone.identity.backends.ldap.Identity'}
+ 'driver': 'ldap'}
}
@@ -1077,22 +1102,22 @@ class DomainConfigManager(manager.Manager):
'provided contains group %(group_other)s '
'instead') % {
'group': group,
- 'group_other': config.keys()[0]}
+ 'group_other': list(config.keys())[0]}
raise exception.InvalidDomainConfig(reason=msg)
if option and option not in config[group]:
msg = _('Trying to update option %(option)s in group '
'%(group)s, but config provided contains option '
'%(option_other)s instead') % {
'group': group, 'option': option,
- 'option_other': config[group].keys()[0]}
+ 'option_other': list(config[group].keys())[0]}
raise exception.InvalidDomainConfig(reason=msg)
# Finally, we need to check if the group/option specified
# already exists in the original config - since if not, to keep
# with the semantics of an update, we need to fail with
# a DomainConfigNotFound
- if not self.get_config_with_sensitive_info(domain_id,
- group, option):
+ if not self._get_config_with_sensitive_info(domain_id,
+ group, option):
if option:
msg = _('option %(option)s in group %(group)s') % {
'group': group, 'option': option}
@@ -1131,6 +1156,7 @@ class DomainConfigManager(manager.Manager):
for new_option in sensitive:
_update_or_create(domain_id, new_option, sensitive=True)
+ self.get_config_with_sensitive_info.invalidate(self, domain_id)
return self.get_config(domain_id)
def delete_config(self, domain_id, group=None, option=None):
@@ -1154,7 +1180,7 @@ class DomainConfigManager(manager.Manager):
if group:
# As this is a partial delete, then make sure the items requested
# are valid and exist in the current config
- current_config = self.get_config_with_sensitive_info(domain_id)
+ current_config = self._get_config_with_sensitive_info(domain_id)
# Raise an exception if the group/options specified don't exist in
# the current config so that the delete method provides the
# correct error semantics.
@@ -1171,14 +1197,14 @@ class DomainConfigManager(manager.Manager):
self.delete_config_options(domain_id, group, option)
self.delete_config_options(domain_id, group, option, sensitive=True)
+ self.get_config_with_sensitive_info.invalidate(self, domain_id)
- def get_config_with_sensitive_info(self, domain_id, group=None,
- option=None):
- """Get config for a domain with sensitive info included.
+ def _get_config_with_sensitive_info(self, domain_id, group=None,
+ option=None):
+ """Get config for a domain/group/option with sensitive info included.
- This method is not exposed via the public API, but is used by the
- identity manager to initialize a domain with the fully formed config
- options.
+ This is only used by the methods within this class, which may need to
+ check individual groups or options.
"""
whitelisted = self.list_config_options(domain_id, group, option)
@@ -1233,6 +1259,17 @@ class DomainConfigManager(manager.Manager):
return self._list_to_config(whitelisted, sensitive)
+ @MEMOIZE_CONFIG
+ def get_config_with_sensitive_info(self, domain_id):
+ """Get config for a domain with sensitive info included.
+
+ This method is not exposed via the public API, but is used by the
+ identity manager to initialize a domain with the fully formed config
+ options.
+
+ """
+ return self._get_config_with_sensitive_info(domain_id)
+
@six.add_metaclass(abc.ABCMeta)
class DomainConfigDriver(object):
diff --git a/keystone-moon/keystone/resource/schema.py b/keystone-moon/keystone/resource/schema.py
index 0fd59e3f..e26a9c4a 100644
--- a/keystone-moon/keystone/resource/schema.py
+++ b/keystone-moon/keystone/resource/schema.py
@@ -21,6 +21,7 @@ _project_properties = {
# implementation.
'domain_id': parameter_types.id_string,
'enabled': parameter_types.boolean,
+ 'is_domain': parameter_types.boolean,
'parent_id': validation.nullable(parameter_types.id_string),
'name': {
'type': 'string',
diff --git a/keystone-moon/keystone/server/backends.py b/keystone-moon/keystone/server/backends.py
new file mode 100644
index 00000000..ebe00a81
--- /dev/null
+++ b/keystone-moon/keystone/server/backends.py
@@ -0,0 +1,64 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from keystone import assignment
+from keystone import auth
+from keystone import catalog
+from keystone.common import cache
+from keystone.contrib import endpoint_filter
+from keystone.contrib import federation
+from keystone.contrib import oauth1
+from keystone.contrib import revoke
+from keystone import credential
+from keystone import endpoint_policy
+from keystone import identity
+from keystone import policy
+from keystone import resource
+from keystone import token
+from keystone import trust
+
+
+def load_backends():
+
+ # Configure and build the cache
+ cache.configure_cache_region(cache.REGION)
+
+ # Ensure that the identity driver is created before the assignment manager
+ # and that the assignment driver is created before the resource manager.
+ # The default resource driver depends on assignment, which in turn
+ # depends on identity - hence we need to ensure the chain is available.
+ _IDENTITY_API = identity.Manager()
+ _ASSIGNMENT_API = assignment.Manager()
+
+ DRIVERS = dict(
+ assignment_api=_ASSIGNMENT_API,
+ catalog_api=catalog.Manager(),
+ credential_api=credential.Manager(),
+ domain_config_api=resource.DomainConfigManager(),
+ endpoint_filter_api=endpoint_filter.Manager(),
+ endpoint_policy_api=endpoint_policy.Manager(),
+ federation_api=federation.Manager(),
+ id_generator_api=identity.generator.Manager(),
+ id_mapping_api=identity.MappingManager(),
+ identity_api=_IDENTITY_API,
+ oauth_api=oauth1.Manager(),
+ policy_api=policy.Manager(),
+ resource_api=resource.Manager(),
+ revoke_api=revoke.Manager(),
+ role_api=assignment.RoleManager(),
+ token_api=token.persistence.Manager(),
+ trust_api=trust.Manager(),
+ token_provider_api=token.provider.Manager())
+
+ auth.controllers.load_auth_methods()
+
+ return DRIVERS
diff --git a/keystone-moon/keystone/server/common.py b/keystone-moon/keystone/server/common.py
index fda44eea..2de6d39e 100644
--- a/keystone-moon/keystone/server/common.py
+++ b/keystone-moon/keystone/server/common.py
@@ -14,10 +14,10 @@
from oslo_config import cfg
-from keystone import backends
from keystone.common import dependency
from keystone.common import sql
from keystone import config
+from keystone.server import backends
CONF = cfg.CONF
diff --git a/keystone-moon/keystone/server/eventlet.py b/keystone-moon/keystone/server/eventlet.py
index 5bedaf9b..243f0234 100644
--- a/keystone-moon/keystone/server/eventlet.py
+++ b/keystone-moon/keystone/server/eventlet.py
@@ -20,6 +20,8 @@ import socket
from oslo_concurrency import processutils
from oslo_config import cfg
import oslo_i18n
+from oslo_service import service
+from oslo_service import systemd
import pbr.version
@@ -34,8 +36,6 @@ from keystone.common import environment
from keystone.common import utils
from keystone import config
from keystone.i18n import _
-from keystone.openstack.common import service
-from keystone.openstack.common import systemd
from keystone.server import common
from keystone import service as keystone_service
@@ -79,9 +79,9 @@ def serve(*servers):
'Support for keystone under eventlet will be removed in '
'the "M"-Release.'))
if max([server[1].workers for server in servers]) > 1:
- launcher = service.ProcessLauncher()
+ launcher = service.ProcessLauncher(CONF)
else:
- launcher = service.ServiceLauncher()
+ launcher = service.ServiceLauncher(CONF)
for name, server in servers:
try:
diff --git a/keystone-moon/keystone/server/wsgi.py b/keystone-moon/keystone/server/wsgi.py
index 863f13bc..dbdad326 100644
--- a/keystone-moon/keystone/server/wsgi.py
+++ b/keystone-moon/keystone/server/wsgi.py
@@ -50,3 +50,11 @@ def initialize_application(name):
_unused, application = common.setup_backends(
startup_application_fn=loadapp)
return application
+
+
+def initialize_admin_application():
+ return initialize_application('admin')
+
+
+def initialize_public_application():
+ return initialize_application('main')
diff --git a/keystone-moon/keystone/service.py b/keystone-moon/keystone/service.py
index e9a0748e..35b548fa 100644
--- a/keystone-moon/keystone/service.py
+++ b/keystone-moon/keystone/service.py
@@ -26,13 +26,14 @@ from keystone import catalog
from keystone.common import wsgi
from keystone import controllers
from keystone import credential
+from keystone import endpoint_policy
from keystone import identity
from keystone import policy
from keystone import resource
from keystone import routers
from keystone import token
from keystone import trust
-from keystone.contrib import moon as authz
+
CONF = cfg.CONF
LOG = log.getLogger(__name__)
@@ -103,11 +104,23 @@ def v3_app_factory(global_conf, **local_conf):
sub_routers = []
_routers = []
- router_modules = [assignment, auth, catalog, credential, identity, policy,
- resource, authz]
+ # NOTE(dstanek): Routers should be ordered by their frequency of use in
+ # a live system. This is due to the routes implementation. The most
+ # frequently used routers should appear first.
+ router_modules = [auth,
+ assignment,
+ catalog,
+ credential,
+ identity,
+ policy,
+ resource]
+
if CONF.trust.enabled:
router_modules.append(trust)
+ if CONF.endpoint_policy.enabled:
+ router_modules.append(endpoint_policy)
+
for module in router_modules:
routers_instance = module.routers.Routers()
_routers.append(routers_instance)
diff --git a/keystone-moon/keystone/tests/functional/__init__.py b/keystone-moon/keystone/tests/functional/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/keystone-moon/keystone/tests/functional/__init__.py
diff --git a/keystone-moon/keystone/tests/functional/shared/__init__.py b/keystone-moon/keystone/tests/functional/shared/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/keystone-moon/keystone/tests/functional/shared/__init__.py
diff --git a/keystone-moon/keystone/tests/functional/shared/test_running.py b/keystone-moon/keystone/tests/functional/shared/test_running.py
new file mode 100644
index 00000000..aed48ac2
--- /dev/null
+++ b/keystone-moon/keystone/tests/functional/shared/test_running.py
@@ -0,0 +1,50 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import requests
+import testtools.matchers
+
+
+is_multiple_choices = testtools.matchers.Equals(
+ requests.status_codes.codes.multiple_choices)
+is_ok = testtools.matchers.Equals(requests.status_codes.codes.ok)
+
+admin_url = 'http://localhost:35357'
+public_url = 'http://localhost:5000'
+versions = ('v2.0', 'v3')
+
+
+class TestServerRunning(testtools.TestCase):
+
+ def test_admin_responds_with_multiple_choices(self):
+ resp = requests.get(admin_url)
+ self.assertThat(resp.status_code, is_multiple_choices)
+
+ def test_admin_versions(self):
+ for version in versions:
+ resp = requests.get(admin_url + '/' + version)
+ self.assertThat(
+ resp.status_code,
+ testtools.matchers.Annotate(
+ 'failed for version %s' % version, is_ok))
+
+ def test_public_responds_with_multiple_choices(self):
+ resp = requests.get(public_url)
+ self.assertThat(resp.status_code, is_multiple_choices)
+
+ def test_public_versions(self):
+ for version in versions:
+ resp = requests.get(public_url + '/' + version)
+ self.assertThat(
+ resp.status_code,
+ testtools.matchers.Annotate(
+ 'failed for version %s' % version, is_ok))
diff --git a/keystone-moon/keystone/tests/hacking/__init__.py b/keystone-moon/keystone/tests/hacking/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/keystone-moon/keystone/tests/hacking/__init__.py
diff --git a/keystone-moon/keystone/tests/hacking/checks.py b/keystone-moon/keystone/tests/hacking/checks.py
new file mode 100644
index 00000000..17bafff3
--- /dev/null
+++ b/keystone-moon/keystone/tests/hacking/checks.py
@@ -0,0 +1,434 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""Keystone's pep8 extensions.
+
+In order to make the review process faster and easier for core devs we are
+adding some Keystone specific pep8 checks. This will catch common errors
+so that core devs don't have to.
+
+There are two types of pep8 extensions. One is a function that takes either
+a physical or logical line. The physical or logical line is the first param
+in the function definition and can be followed by other parameters supported
+by pep8. The second type is a class that parses AST trees. For more info
+please see pep8.py.
+"""
+
+import ast
+import re
+
+import six
+
+
+class BaseASTChecker(ast.NodeVisitor):
+ """Provides a simple framework for writing AST-based checks.
+
+ Subclasses should implement visit_* methods like any other AST visitor
+ implementation. When they detect an error for a particular node the
+ method should call ``self.add_error(offending_node)``. Details about
+ where in the code the error occurred will be pulled from the node
+ object.
+
+ Subclasses should also provide a class variable named CHECK_DESC to
+ be used for the human readable error message.
+
+ """
+
+ def __init__(self, tree, filename):
+ """This object is created automatically by pep8.
+
+ :param tree: an AST tree
+ :param filename: name of the file being analyzed
+ (ignored by our checks)
+ """
+ self._tree = tree
+ self._errors = []
+
+ def run(self):
+ """Called automatically by pep8."""
+ self.visit(self._tree)
+ return self._errors
+
+ def add_error(self, node, message=None):
+ """Add an error caused by a node to the list of errors for pep8."""
+ message = message or self.CHECK_DESC
+ error = (node.lineno, node.col_offset, message, self.__class__)
+ self._errors.append(error)
+
+
+class CheckForMutableDefaultArgs(BaseASTChecker):
+ """Checks for the use of mutable objects as function/method defaults.
+
+ We are only checking for list and dict literals at this time. This means
+ that a developer could specify an instance of their own and cause a bug.
+ The fix for this is probably more work than it's worth because it will
+ get caught during code review.
+
+ """
+
+ CHECK_DESC = 'K001 Using mutable as a function/method default'
+ MUTABLES = (
+ ast.List, ast.ListComp,
+ ast.Dict, ast.DictComp,
+ ast.Set, ast.SetComp,
+ ast.Call)
+
+ def visit_FunctionDef(self, node):
+ for arg in node.args.defaults:
+ if isinstance(arg, self.MUTABLES):
+ self.add_error(arg)
+
+ super(CheckForMutableDefaultArgs, self).generic_visit(node)
+
+
+def block_comments_begin_with_a_space(physical_line, line_number):
+ """There should be a space after the # of block comments.
+
+ There is already a check in pep8 that enforces this rule for
+ inline comments.
+
+ Okay: # this is a comment
+ Okay: #!/usr/bin/python
+ Okay: # this is a comment
+ K002: #this is a comment
+
+ """
+ MESSAGE = "K002 block comments should start with '# '"
+
+ # shebangs are OK
+ if line_number == 1 and physical_line.startswith('#!'):
+ return
+
+ text = physical_line.strip()
+ if text.startswith('#'): # look for block comments
+ if len(text) > 1 and not text[1].isspace():
+ return physical_line.index('#'), MESSAGE
+
+
+class CheckForAssertingNoneEquality(BaseASTChecker):
+ """Ensures that code does not use a None with assert(Not*)Equal."""
+
+ CHECK_DESC_IS = ('K003 Use self.assertIsNone(...) when comparing '
+ 'against None')
+ CHECK_DESC_ISNOT = ('K004 Use assertIsNotNone(...) when comparing '
+ ' against None')
+
+ def visit_Call(self, node):
+ # NOTE(dstanek): I wrote this in a verbose way to make it easier to
+ # read for those that have little experience with Python's AST.
+
+ if isinstance(node.func, ast.Attribute):
+ if node.func.attr == 'assertEqual':
+ for arg in node.args:
+ if isinstance(arg, ast.Name) and arg.id == 'None':
+ self.add_error(node, message=self.CHECK_DESC_IS)
+ elif node.func.attr == 'assertNotEqual':
+ for arg in node.args:
+ if isinstance(arg, ast.Name) and arg.id == 'None':
+ self.add_error(node, message=self.CHECK_DESC_ISNOT)
+
+ super(CheckForAssertingNoneEquality, self).generic_visit(node)
+
+
+class CheckForLoggingIssues(BaseASTChecker):
+
+ DEBUG_CHECK_DESC = 'K005 Using translated string in debug logging'
+ NONDEBUG_CHECK_DESC = 'K006 Not using translating helper for logging'
+ EXCESS_HELPER_CHECK_DESC = 'K007 Using hints when _ is necessary'
+ LOG_MODULES = ('logging', 'oslo_log.log')
+ I18N_MODULES = (
+ 'keystone.i18n._',
+ 'keystone.i18n._LI',
+ 'keystone.i18n._LW',
+ 'keystone.i18n._LE',
+ 'keystone.i18n._LC',
+ )
+ TRANS_HELPER_MAP = {
+ 'debug': None,
+ 'info': '_LI',
+ 'warn': '_LW',
+ 'warning': '_LW',
+ 'error': '_LE',
+ 'exception': '_LE',
+ 'critical': '_LC',
+ }
+
+ def __init__(self, tree, filename):
+ super(CheckForLoggingIssues, self).__init__(tree, filename)
+
+ self.logger_names = []
+ self.logger_module_names = []
+ self.i18n_names = {}
+
+ # NOTE(dstanek): this kinda accounts for scopes when talking
+ # about only leaf node in the graph
+ self.assignments = {}
+
+ def generic_visit(self, node):
+ """Called if no explicit visitor function exists for a node."""
+ for field, value in ast.iter_fields(node):
+ if isinstance(value, list):
+ for item in value:
+ if isinstance(item, ast.AST):
+ item._parent = node
+ self.visit(item)
+ elif isinstance(value, ast.AST):
+ value._parent = node
+ self.visit(value)
+
+ def _filter_imports(self, module_name, alias):
+ """Keeps lists of logging and i18n imports
+
+ """
+ if module_name in self.LOG_MODULES:
+ self.logger_module_names.append(alias.asname or alias.name)
+ elif module_name in self.I18N_MODULES:
+ self.i18n_names[alias.asname or alias.name] = alias.name
+
+ def visit_Import(self, node):
+ for alias in node.names:
+ self._filter_imports(alias.name, alias)
+ return super(CheckForLoggingIssues, self).generic_visit(node)
+
+ def visit_ImportFrom(self, node):
+ for alias in node.names:
+ full_name = '%s.%s' % (node.module, alias.name)
+ self._filter_imports(full_name, alias)
+ return super(CheckForLoggingIssues, self).generic_visit(node)
+
+ def _find_name(self, node):
+ """Return the fully qualified name or a Name or Attribute."""
+ if isinstance(node, ast.Name):
+ return node.id
+ elif (isinstance(node, ast.Attribute)
+ and isinstance(node.value, (ast.Name, ast.Attribute))):
+ method_name = node.attr
+ obj_name = self._find_name(node.value)
+ if obj_name is None:
+ return None
+ return obj_name + '.' + method_name
+ elif isinstance(node, six.string_types):
+ return node
+ else: # could be Subscript, Call or many more
+ return None
+
+ def visit_Assign(self, node):
+ """Look for 'LOG = logging.getLogger'
+
+ This handles the simple case:
+ name = [logging_module].getLogger(...)
+
+ - or -
+
+ name = [i18n_name](...)
+
+ And some much more comple ones:
+ name = [i18n_name](...) % X
+
+ - or -
+
+ self.name = [i18n_name](...) % X
+
+ """
+ attr_node_types = (ast.Name, ast.Attribute)
+
+ if (len(node.targets) != 1
+ or not isinstance(node.targets[0], attr_node_types)):
+ # say no to: "x, y = ..."
+ return super(CheckForLoggingIssues, self).generic_visit(node)
+
+ target_name = self._find_name(node.targets[0])
+
+ if (isinstance(node.value, ast.BinOp) and
+ isinstance(node.value.op, ast.Mod)):
+ if (isinstance(node.value.left, ast.Call) and
+ isinstance(node.value.left.func, ast.Name) and
+ node.value.left.func.id in self.i18n_names):
+ # NOTE(dstanek): this is done to match cases like:
+ # `msg = _('something %s') % x`
+ node = ast.Assign(value=node.value.left)
+
+ if not isinstance(node.value, ast.Call):
+ # node.value must be a call to getLogger
+ self.assignments.pop(target_name, None)
+ return super(CheckForLoggingIssues, self).generic_visit(node)
+
+ # is this a call to an i18n function?
+ if (isinstance(node.value.func, ast.Name)
+ and node.value.func.id in self.i18n_names):
+ self.assignments[target_name] = node.value.func.id
+ return super(CheckForLoggingIssues, self).generic_visit(node)
+
+ if (not isinstance(node.value.func, ast.Attribute)
+ or not isinstance(node.value.func.value, attr_node_types)):
+ # function must be an attribute on an object like
+ # logging.getLogger
+ return super(CheckForLoggingIssues, self).generic_visit(node)
+
+ object_name = self._find_name(node.value.func.value)
+ func_name = node.value.func.attr
+
+ if (object_name in self.logger_module_names
+ and func_name == 'getLogger'):
+ self.logger_names.append(target_name)
+
+ return super(CheckForLoggingIssues, self).generic_visit(node)
+
+ def visit_Call(self, node):
+ """Look for the 'LOG.*' calls.
+
+ """
+
+ # obj.method
+ if isinstance(node.func, ast.Attribute):
+ obj_name = self._find_name(node.func.value)
+ if isinstance(node.func.value, ast.Name):
+ method_name = node.func.attr
+ elif isinstance(node.func.value, ast.Attribute):
+ obj_name = self._find_name(node.func.value)
+ method_name = node.func.attr
+ else: # could be Subscript, Call or many more
+ return super(CheckForLoggingIssues, self).generic_visit(node)
+
+ # must be a logger instance and one of the support logging methods
+ if (obj_name not in self.logger_names
+ or method_name not in self.TRANS_HELPER_MAP):
+ return super(CheckForLoggingIssues, self).generic_visit(node)
+
+ # the call must have arguments
+ if not len(node.args):
+ return super(CheckForLoggingIssues, self).generic_visit(node)
+
+ if method_name == 'debug':
+ self._process_debug(node)
+ elif method_name in self.TRANS_HELPER_MAP:
+ self._process_non_debug(node, method_name)
+
+ return super(CheckForLoggingIssues, self).generic_visit(node)
+
+ def _process_debug(self, node):
+ msg = node.args[0] # first arg to a logging method is the msg
+
+ # if first arg is a call to a i18n name
+ if (isinstance(msg, ast.Call)
+ and isinstance(msg.func, ast.Name)
+ and msg.func.id in self.i18n_names):
+ self.add_error(msg, message=self.DEBUG_CHECK_DESC)
+
+ # if the first arg is a reference to a i18n call
+ elif (isinstance(msg, ast.Name)
+ and msg.id in self.assignments
+ and not self._is_raised_later(node, msg.id)):
+ self.add_error(msg, message=self.DEBUG_CHECK_DESC)
+
+ def _process_non_debug(self, node, method_name):
+ msg = node.args[0] # first arg to a logging method is the msg
+
+ # if first arg is a call to a i18n name
+ if isinstance(msg, ast.Call):
+ try:
+ func_name = msg.func.id
+ except AttributeError:
+ # in the case of logging only an exception, the msg function
+ # will not have an id associated with it, for instance:
+ # LOG.warning(six.text_type(e))
+ return
+
+ # the function name is the correct translation helper
+ # for the logging method
+ if func_name == self.TRANS_HELPER_MAP[method_name]:
+ return
+
+ # the function name is an alias for the correct translation
+ # helper for the loggine method
+ if (self.i18n_names[func_name] ==
+ self.TRANS_HELPER_MAP[method_name]):
+ return
+
+ self.add_error(msg, message=self.NONDEBUG_CHECK_DESC)
+
+ # if the first arg is not a reference to the correct i18n hint
+ elif isinstance(msg, ast.Name):
+
+ # FIXME(dstanek): to make sure more robust we should be checking
+ # all names passed into a logging method. we can't right now
+ # because:
+ # 1. We have code like this that we'll fix when dealing with the %:
+ # msg = _('....') % {}
+ # LOG.warn(msg)
+ # 2. We also do LOG.exception(e) in several places. I'm not sure
+ # exactly what we should be doing about that.
+ if msg.id not in self.assignments:
+ return
+
+ helper_method_name = self.TRANS_HELPER_MAP[method_name]
+ if (self.assignments[msg.id] != helper_method_name
+ and not self._is_raised_later(node, msg.id)):
+ self.add_error(msg, message=self.NONDEBUG_CHECK_DESC)
+ elif (self.assignments[msg.id] == helper_method_name
+ and self._is_raised_later(node, msg.id)):
+ self.add_error(msg, message=self.EXCESS_HELPER_CHECK_DESC)
+
+ def _is_raised_later(self, node, name):
+
+ def find_peers(node):
+ node_for_line = node._parent
+ for _field, value in ast.iter_fields(node._parent._parent):
+ if isinstance(value, list) and node_for_line in value:
+ return value[value.index(node_for_line) + 1:]
+ continue
+ return []
+
+ peers = find_peers(node)
+ for peer in peers:
+ if isinstance(peer, ast.Raise):
+ if (isinstance(peer.type, ast.Call) and
+ len(peer.type.args) > 0 and
+ isinstance(peer.type.args[0], ast.Name) and
+ name in (a.id for a in peer.type.args)):
+ return True
+ else:
+ return False
+ elif isinstance(peer, ast.Assign):
+ if name in (t.id for t in peer.targets):
+ return False
+
+
+def dict_constructor_with_sequence_copy(logical_line):
+ """Should use a dict comprehension instead of a dict constructor.
+
+ PEP-0274 introduced dict comprehension with performance enhancement
+ and it also makes code more readable.
+
+ Okay: lower_res = {k.lower(): v for k, v in six.iteritems(res[1])}
+ Okay: fool = dict(a='a', b='b')
+ K008: lower_res = dict((k.lower(), v) for k, v in six.iteritems(res[1]))
+ K008: attrs = dict([(k, _from_json(v))
+ K008: dict([[i,i] for i in range(3)])
+
+ """
+ MESSAGE = ("K008 Must use a dict comprehension instead of a dict"
+ " constructor with a sequence of key-value pairs.")
+
+ dict_constructor_with_sequence_re = (
+ re.compile(r".*\bdict\((\[)?(\(|\[)(?!\{)"))
+
+ if dict_constructor_with_sequence_re.match(logical_line):
+ yield (0, MESSAGE)
+
+
+def factory(register):
+ register(CheckForMutableDefaultArgs)
+ register(block_comments_begin_with_a_space)
+ register(CheckForAssertingNoneEquality)
+ register(CheckForLoggingIssues)
+ register(dict_constructor_with_sequence_copy)
diff --git a/keystone-moon/keystone/tests/moon/unit/__init__.py b/keystone-moon/keystone/tests/moon/unit/__init__.py
index 0cd835ce..54c9252e 100644
--- a/keystone-moon/keystone/tests/moon/unit/__init__.py
+++ b/keystone-moon/keystone/tests/moon/unit/__init__.py
@@ -3,7 +3,6 @@
# license which can be found in the file 'LICENSE' in this package distribution
# or at 'http://www.apache.org/licenses/LICENSE-2.0'.
import uuid
-from keystone.contrib.moon.core import ADMIN_ID
USER = {
'name': 'admin',
@@ -25,10 +24,8 @@ def create_intra_extension(self, policy_model="policy_authz"):
if "authz" in policy_model:
genre = "authz"
IE["genre"] = genre
- # force re-initialization of the ADMIN_ID variable
- from keystone.contrib.moon.core import ADMIN_ID
- self.ADMIN_ID = ADMIN_ID
- ref = self.admin_api.load_intra_extension_dict(self.ADMIN_ID, intra_extension_dict=IE)
+ ref = self.admin_api.load_intra_extension_dict(self.root_api.get_root_admin_id(),
+ intra_extension_dict=IE)
self.assertIsInstance(ref, dict)
return ref
@@ -62,7 +59,6 @@ def create_user(self, username="TestAdminIntraExtensionManagerUser"):
def create_mapping(self, tenant_name=None, authz_id=None, admin_id=None):
- from keystone.contrib.moon.core import ADMIN_ID
if not tenant_name:
tenant_name = uuid.uuid4().hex
@@ -76,7 +72,7 @@ def create_mapping(self, tenant_name=None, authz_id=None, admin_id=None):
"domain_id": "default"
}
keystone_tenant = self.resource_api.create_project(tenant["id"], tenant)
- mapping = self.tenant_api.add_tenant_dict(ADMIN_ID, tenant)
+ mapping = self.tenant_api.add_tenant_dict(self.root_api.get_root_admin_id(), tenant)
self.assertIsInstance(mapping, dict)
self.assertIn("intra_authz_extension_id", mapping[tenant["id"]])
self.assertIn("intra_admin_extension_id", mapping[tenant["id"]])
diff --git a/keystone-moon/keystone/tests/moon/unit/test_unit_core_configuration.py b/keystone-moon/keystone/tests/moon/unit/test_unit_core_configuration.py
index 1d612b7d..83606ff3 100644
--- a/keystone-moon/keystone/tests/moon/unit/test_unit_core_configuration.py
+++ b/keystone-moon/keystone/tests/moon/unit/test_unit_core_configuration.py
@@ -12,34 +12,38 @@ from keystone.contrib.moon.core import ConfigurationManager
from keystone.tests.unit.ksfixtures import database
from keystone.contrib.moon.exception import *
from keystone.tests.unit import default_fixtures
-from keystone.contrib.moon.core import ADMIN_ID
from keystone.contrib.moon.core import LogManager
from keystone.contrib.moon.core import IntraExtensionAdminManager
+from keystone.contrib.moon.core import IntraExtensionRootManager
from keystone.tests.moon.unit import *
CONF = cfg.CONF
-@dependency.requires('admin_api', 'authz_api', 'tenant_api', 'configuration_api', 'moonlog_api')
+# @dependency.requires('admin_api', 'authz_api', 'tenant_api', 'configuration_api', 'moonlog_api')
class TestConfigurationManager(tests.TestCase):
def setUp(self):
self.useFixture(database.Database())
super(TestConfigurationManager, self).setUp()
- self.load_backends()
self.load_fixtures(default_fixtures)
+ self.load_backends()
+ domain = {'id': "default", 'name': "default"}
+ self.resource_api.create_domain(domain['id'], domain)
self.admin = create_user(self, username="admin")
self.demo = create_user(self, username="demo")
- self.root_intra_extension = create_intra_extension(self, policy_model="policy_root")
- # force re-initialization of the ADMIN_ID variable
- from keystone.contrib.moon.core import ADMIN_ID
- self.ADMIN_ID = ADMIN_ID
- self.manager = self.configuration_api
+ self.root_intra_extension = self.root_api.get_root_extension_dict()
+ self.root_intra_extension_id = self.root_intra_extension.keys()[0]
+ self.ADMIN_ID = self.root_api.get_root_admin_id()
+ self.authz_manager = self.authz_api
+ self.admin_manager = self.admin_api
+ self.configuration_manager = self.configuration_api
def load_extra_backends(self):
return {
"moonlog_api": LogManager(),
- "admin_api": IntraExtensionAdminManager()
+ "admin_api": IntraExtensionAdminManager(),
+ "root_api": IntraExtensionRootManager()
}
def config_overrides(self):
@@ -60,10 +64,9 @@ class TestConfigurationManager(tests.TestCase):
policy_directory=self.policy_directory)
def test_get_policy_template_dict(self):
- data = self.manager.get_policy_templates_dict(self.ADMIN_ID)
+ data = self.configuration_manager.get_policy_templates_dict(self.ADMIN_ID)
self.assertIsInstance(data, dict)
- self.assertIn("authz_templates", data)
- self.assertIn("policy_root", data["authz_templates"])
+ self.assertIn("policy_root", data)
# def test_get_aggregation_algorithm_dict(self):
# admin_intra_extension = create_intra_extension(self, policy_model="policy_admin")
diff --git a/keystone-moon/keystone/tests/moon/unit/test_unit_core_intra_extension_admin.py b/keystone-moon/keystone/tests/moon/unit/test_unit_core_intra_extension_admin.py
index 60122b9d..e76173e7 100644
--- a/keystone-moon/keystone/tests/moon/unit/test_unit_core_intra_extension_admin.py
+++ b/keystone-moon/keystone/tests/moon/unit/test_unit_core_intra_extension_admin.py
@@ -16,7 +16,6 @@ from keystone import resource
from keystone.contrib.moon.exception import *
from keystone.tests.unit import default_fixtures
from keystone.contrib.moon.core import LogManager, TenantManager
-from keystone.contrib.moon.core import ADMIN_ID
from keystone.tests.moon.unit import *
CONF = cfg.CONF
@@ -33,6 +32,7 @@ IE = {
"description": "a simple description."
}
+
@dependency.requires('admin_api', 'authz_api', 'tenant_api', 'configuration_api', 'moonlog_api')
class TestIntraExtensionAdminManagerOK(tests.TestCase):
@@ -40,15 +40,16 @@ class TestIntraExtensionAdminManagerOK(tests.TestCase):
def setUp(self):
self.useFixture(database.Database())
super(TestIntraExtensionAdminManagerOK, self).setUp()
- self.load_backends()
self.load_fixtures(default_fixtures)
+ self.load_backends()
+ domain = {'id': "default", 'name': "default"}
+ self.resource_api.create_domain(domain['id'], domain)
self.admin = create_user(self, username="admin")
self.demo = create_user(self, username="demo")
- self.root_intra_extension = create_intra_extension(self, policy_model="policy_root")
- # force re-initialization of the ADMIN_ID variable
- from keystone.contrib.moon.core import ADMIN_ID
- self.ADMIN_ID = ADMIN_ID
- self.manager = self.authz_api
+ self.root_intra_extension = self.root_api.get_root_extension_dict()
+ self.root_intra_extension_id = self.root_intra_extension.keys()[0]
+ self.ADMIN_ID = self.root_api.get_root_admin_id()
+ self.authz_manager = self.authz_api
self.admin_manager = self.admin_api
def __get_key_from_value(self, value, values_dict):
@@ -74,7 +75,7 @@ class TestIntraExtensionAdminManagerOK(tests.TestCase):
policy_directory=self.policy_directory)
def delete_admin_intra_extension(self):
- self.manager.del_intra_extension(self.ref["id"])
+ self.authz_manager.del_intra_extension(self.ref["id"])
def test_subjects(self):
authz_ie_dict = create_intra_extension(self, "policy_authz")
@@ -82,12 +83,10 @@ class TestIntraExtensionAdminManagerOK(tests.TestCase):
tenant, mapping = create_mapping(self, "demo", authz_ie_dict['id'], admin_ie_dict['id'])
admin_subject_id, admin_subject_dict = \
- self.tenant_api.get_subject_dict_from_keystone_name(tenant['id'], admin_ie_dict['id'], 'admin').iteritems().next()
- demo_subject_dict = self.manager.add_subject_dict(admin_subject_id, admin_ie_dict["id"],
- {"name": "demo", "description": "demo"})
+ self.admin_api.get_subject_dict_from_keystone_name(tenant['id'], admin_ie_dict['id'], 'admin').iteritems().next()
demo_subject_id, demo_subject_dict = \
- self.tenant_api.get_subject_dict_from_keystone_name(tenant['id'], admin_ie_dict['id'], 'demo').iteritems().next()
- subjects = self.manager.get_subjects_dict(admin_subject_id, authz_ie_dict["id"])
+ self.admin_api.get_subject_dict_from_keystone_name(tenant['id'], admin_ie_dict['id'], 'demo').iteritems().next()
+ subjects = self.authz_manager.get_subjects_dict(admin_subject_id, authz_ie_dict["id"])
self.assertIsInstance(subjects, dict)
for key, value in subjects.iteritems():
self.assertIsInstance(value, dict)
@@ -112,7 +111,7 @@ class TestIntraExtensionAdminManagerOK(tests.TestCase):
# Delete the new subject
self.admin_manager.del_subject(admin_subject_id, authz_ie_dict["id"], new_subject["id"])
- subjects = self.manager.get_subjects_dict(admin_subject_id, authz_ie_dict["id"])
+ subjects = self.authz_manager.get_subjects_dict(admin_subject_id, authz_ie_dict["id"])
for key, value in subjects.iteritems():
self.assertIsInstance(value, dict)
self.assertIn("name", value)
@@ -125,12 +124,10 @@ class TestIntraExtensionAdminManagerOK(tests.TestCase):
tenant, mapping = create_mapping(self, "demo", authz_ie_dict['id'], admin_ie_dict['id'])
admin_subject_id, admin_subject_dict = \
- self.tenant_api.get_subject_dict_from_keystone_name(tenant['id'], admin_ie_dict['id'], 'admin').iteritems().next()
- demo_subject_dict = self.manager.add_subject_dict(admin_subject_id, admin_ie_dict["id"],
- {"name": "demo", "description": "demo"})
+ self.admin_api.get_subject_dict_from_keystone_name(tenant['id'], admin_ie_dict['id'], 'admin').iteritems().next()
demo_subject_id, demo_subject_dict = \
- self.tenant_api.get_subject_dict_from_keystone_name(tenant['id'], admin_ie_dict['id'], 'demo').iteritems().next()
- objects = self.manager.get_objects_dict(admin_subject_id, authz_ie_dict["id"])
+ self.admin_api.get_subject_dict_from_keystone_name(tenant['id'], admin_ie_dict['id'], 'demo').iteritems().next()
+ objects = self.authz_manager.get_objects_dict(admin_subject_id, authz_ie_dict["id"])
objects_id_list = []
self.assertIsInstance(objects, dict)
for key, value in objects.iteritems():
@@ -145,12 +142,10 @@ class TestIntraExtensionAdminManagerOK(tests.TestCase):
tenant, mapping = create_mapping(self, "demo", authz_ie_dict['id'], admin_ie_dict['id'])
admin_subject_id, admin_subject_dict = \
- self.tenant_api.get_subject_dict_from_keystone_name(tenant['id'], admin_ie_dict['id'], 'admin').iteritems().next()
- demo_subject_dict = self.manager.add_subject_dict(admin_subject_id, admin_ie_dict["id"],
- {"name": "demo", "description": "demo"})
+ self.admin_api.get_subject_dict_from_keystone_name(tenant['id'], admin_ie_dict['id'], 'admin').iteritems().next()
demo_subject_id, demo_subject_dict = \
- self.tenant_api.get_subject_dict_from_keystone_name(tenant['id'], admin_ie_dict['id'], 'demo').iteritems().next()
- actions = self.manager.get_actions_dict(admin_subject_id, authz_ie_dict["id"])
+ self.admin_api.get_subject_dict_from_keystone_name(tenant['id'], admin_ie_dict['id'], 'demo').iteritems().next()
+ actions = self.authz_manager.get_actions_dict(admin_subject_id, authz_ie_dict["id"])
actions_id_list = []
self.assertIsInstance(actions, dict)
for key, value in actions.iteritems():
@@ -165,12 +160,10 @@ class TestIntraExtensionAdminManagerOK(tests.TestCase):
tenant, mapping = create_mapping(self, "demo", authz_ie_dict['id'], admin_ie_dict['id'])
admin_subject_id, admin_subject_dict = \
- self.tenant_api.get_subject_dict_from_keystone_name(tenant['id'], admin_ie_dict['id'], 'admin').iteritems().next()
- demo_subject_dict = self.manager.add_subject_dict(admin_subject_id, admin_ie_dict["id"],
- {"name": "demo", "description": "demo"})
+ self.admin_api.get_subject_dict_from_keystone_name(tenant['id'], admin_ie_dict['id'], 'admin').iteritems().next()
demo_subject_id, demo_subject_dict = \
- self.tenant_api.get_subject_dict_from_keystone_name(tenant['id'], admin_ie_dict['id'], 'demo').iteritems().next()
- subject_categories = self.manager.get_subject_categories_dict(admin_subject_id, authz_ie_dict["id"])
+ self.admin_api.get_subject_dict_from_keystone_name(tenant['id'], admin_ie_dict['id'], 'demo').iteritems().next()
+ subject_categories = self.authz_manager.get_subject_categories_dict(admin_subject_id, authz_ie_dict["id"])
self.assertIsInstance(subject_categories, dict)
for key, value in subject_categories.iteritems():
self.assertIsInstance(value, dict)
@@ -192,7 +185,7 @@ class TestIntraExtensionAdminManagerOK(tests.TestCase):
# Delete the new subject_category
self.admin_manager.del_subject_category(admin_subject_id, authz_ie_dict["id"], new_subject_category["id"])
- subject_categories = self.manager.get_subject_categories_dict(admin_subject_id, authz_ie_dict["id"])
+ subject_categories = self.authz_manager.get_subject_categories_dict(admin_subject_id, authz_ie_dict["id"])
for key, value in subject_categories.iteritems():
self.assertIsInstance(value, dict)
self.assertIn("name", value)
@@ -205,12 +198,10 @@ class TestIntraExtensionAdminManagerOK(tests.TestCase):
tenant, mapping = create_mapping(self, "demo", authz_ie_dict['id'], admin_ie_dict['id'])
admin_subject_id, admin_subject_dict = \
- self.tenant_api.get_subject_dict_from_keystone_name(tenant['id'], admin_ie_dict['id'], 'admin').iteritems().next()
- demo_subject_dict = self.manager.add_subject_dict(admin_subject_id, admin_ie_dict["id"],
- {"name": "demo", "description": "demo"})
+ self.admin_api.get_subject_dict_from_keystone_name(tenant['id'], admin_ie_dict['id'], 'admin').iteritems().next()
demo_subject_id, demo_subject_dict = \
- self.tenant_api.get_subject_dict_from_keystone_name(tenant['id'], admin_ie_dict['id'], 'demo').iteritems().next()
- object_categories = self.manager.get_object_categories_dict(admin_subject_id, authz_ie_dict["id"])
+ self.admin_api.get_subject_dict_from_keystone_name(tenant['id'], admin_ie_dict['id'], 'demo').iteritems().next()
+ object_categories = self.authz_manager.get_object_categories_dict(admin_subject_id, authz_ie_dict["id"])
self.assertIsInstance(object_categories, dict)
for key, value in object_categories.iteritems():
self.assertIsInstance(value, dict)
@@ -233,7 +224,7 @@ class TestIntraExtensionAdminManagerOK(tests.TestCase):
# Delete the new object_category
self.admin_manager.del_object_category(admin_subject_id, authz_ie_dict["id"], new_object_category["id"])
- object_categories = self.manager.get_object_categories_dict(admin_subject_id, authz_ie_dict["id"])
+ object_categories = self.authz_manager.get_object_categories_dict(admin_subject_id, authz_ie_dict["id"])
for key, value in object_categories.iteritems():
self.assertIsInstance(value, dict)
self.assertIn("name", value)
@@ -246,12 +237,10 @@ class TestIntraExtensionAdminManagerOK(tests.TestCase):
tenant, mapping = create_mapping(self, "demo", authz_ie_dict['id'], admin_ie_dict['id'])
admin_subject_id, admin_subject_dict = \
- self.tenant_api.get_subject_dict_from_keystone_name(tenant['id'], admin_ie_dict['id'], 'admin').iteritems().next()
- demo_subject_dict = self.manager.add_subject_dict(admin_subject_id, admin_ie_dict["id"],
- {"name": "demo", "description": "demo"})
+ self.admin_api.get_subject_dict_from_keystone_name(tenant['id'], admin_ie_dict['id'], 'admin').iteritems().next()
demo_subject_id, demo_subject_dict = \
- self.tenant_api.get_subject_dict_from_keystone_name(tenant['id'], admin_ie_dict['id'], 'demo').iteritems().next()
- action_categories = self.manager.get_action_categories_dict(admin_subject_id, authz_ie_dict["id"])
+ self.admin_api.get_subject_dict_from_keystone_name(tenant['id'], admin_ie_dict['id'], 'demo').iteritems().next()
+ action_categories = self.authz_manager.get_action_categories_dict(admin_subject_id, authz_ie_dict["id"])
self.assertIsInstance(action_categories, dict)
for key, value in action_categories.iteritems():
self.assertIsInstance(value, dict)
@@ -274,7 +263,7 @@ class TestIntraExtensionAdminManagerOK(tests.TestCase):
# Delete the new action_category
self.admin_manager.del_action_category(admin_subject_id, authz_ie_dict["id"], new_action_category["id"])
- action_categories = self.manager.get_action_categories_dict(admin_subject_id, authz_ie_dict["id"])
+ action_categories = self.authz_manager.get_action_categories_dict(admin_subject_id, authz_ie_dict["id"])
for key, value in action_categories.iteritems():
self.assertIsInstance(value, dict)
self.assertIn("name", value)
@@ -287,11 +276,11 @@ class TestIntraExtensionAdminManagerOK(tests.TestCase):
tenant, mapping = create_mapping(self, "demo", authz_ie_dict['id'], admin_ie_dict['id'])
admin_subject_id, admin_subject_dict = \
- self.tenant_api.get_subject_dict_from_keystone_name(tenant['id'], admin_ie_dict['id'], 'admin').iteritems().next()
- demo_subject_dict = self.manager.add_subject_dict(admin_subject_id, admin_ie_dict["id"],
- {"name": "demo", "description": "demo"})
+ self.admin_api.get_subject_dict_from_keystone_name(tenant['id'], admin_ie_dict['id'], 'admin').iteritems().next()
+ # demo_subject_dict = self.manager.add_subject_dict(admin_subject_id, admin_ie_dict["id"],
+ # {"name": "demo", "description": "demo"})
demo_subject_id, demo_subject_dict = \
- self.tenant_api.get_subject_dict_from_keystone_name(tenant['id'], admin_ie_dict['id'], 'demo').iteritems().next()
+ self.admin_api.get_subject_dict_from_keystone_name(tenant['id'], admin_ie_dict['id'], 'demo').iteritems().next()
subject_categories = self.admin_manager.add_subject_category_dict(
admin_subject_id,
@@ -304,7 +293,7 @@ class TestIntraExtensionAdminManagerOK(tests.TestCase):
for subject_category_id in subject_categories:
- subject_category_scope = self.manager.get_subject_scopes_dict(
+ subject_category_scope = self.authz_manager.get_subject_scopes_dict(
admin_subject_id,
authz_ie_dict["id"],
subject_category_id)
@@ -350,11 +339,11 @@ class TestIntraExtensionAdminManagerOK(tests.TestCase):
tenant, mapping = create_mapping(self, "demo", authz_ie_dict['id'], admin_ie_dict['id'])
admin_subject_id, admin_subject_dict = \
- self.tenant_api.get_subject_dict_from_keystone_name(tenant['id'], admin_ie_dict['id'], 'admin').iteritems().next()
- demo_subject_dict = self.manager.add_subject_dict(admin_subject_id, admin_ie_dict["id"],
- {"name": "demo", "description": "demo"})
+ self.admin_api.get_subject_dict_from_keystone_name(tenant['id'], admin_ie_dict['id'], 'admin').iteritems().next()
+ # demo_subject_dict = self.manager.add_subject_dict(admin_subject_id, admin_ie_dict["id"],
+ # {"name": "demo", "description": "demo"})
demo_subject_id, demo_subject_dict = \
- self.tenant_api.get_subject_dict_from_keystone_name(tenant['id'], admin_ie_dict['id'], 'demo').iteritems().next()
+ self.admin_api.get_subject_dict_from_keystone_name(tenant['id'], admin_ie_dict['id'], 'demo').iteritems().next()
object_categories = self.admin_manager.add_object_category_dict(
admin_subject_id,
@@ -367,7 +356,7 @@ class TestIntraExtensionAdminManagerOK(tests.TestCase):
for object_category_id in object_categories:
- object_category_scope = self.manager.get_object_scopes_dict(
+ object_category_scope = self.authz_manager.get_object_scopes_dict(
admin_subject_id,
authz_ie_dict["id"],
object_category_id)
@@ -413,11 +402,11 @@ class TestIntraExtensionAdminManagerOK(tests.TestCase):
tenant, mapping = create_mapping(self, "demo", authz_ie_dict['id'], admin_ie_dict['id'])
admin_subject_id, admin_subject_dict = \
- self.tenant_api.get_subject_dict_from_keystone_name(tenant['id'], admin_ie_dict['id'], 'admin').iteritems().next()
- demo_subject_dict = self.manager.add_subject_dict(admin_subject_id, admin_ie_dict["id"],
- {"name": "demo", "description": "demo"})
+ self.admin_api.get_subject_dict_from_keystone_name(tenant['id'], admin_ie_dict['id'], 'admin').iteritems().next()
+ # demo_subject_dict = self.manager.add_subject_dict(admin_subject_id, admin_ie_dict["id"],
+ # {"name": "demo", "description": "demo"})
demo_subject_id, demo_subject_dict = \
- self.tenant_api.get_subject_dict_from_keystone_name(tenant['id'], admin_ie_dict['id'], 'demo').iteritems().next()
+ self.admin_api.get_subject_dict_from_keystone_name(tenant['id'], admin_ie_dict['id'], 'demo').iteritems().next()
action_categories = self.admin_manager.add_action_category_dict(
admin_subject_id,
@@ -430,7 +419,7 @@ class TestIntraExtensionAdminManagerOK(tests.TestCase):
for action_category_id in action_categories:
- action_category_scope = self.manager.get_action_scopes_dict(
+ action_category_scope = self.authz_manager.get_action_scopes_dict(
admin_subject_id,
authz_ie_dict["id"],
action_category_id)
@@ -476,17 +465,17 @@ class TestIntraExtensionAdminManagerOK(tests.TestCase):
tenant, mapping = create_mapping(self, "demo", authz_ie_dict['id'], admin_ie_dict['id'])
admin_subject_id, admin_subject_dict = \
- self.tenant_api.get_subject_dict_from_keystone_name(tenant['id'], admin_ie_dict['id'], 'admin').iteritems().next()
+ self.admin_api.get_subject_dict_from_keystone_name(tenant['id'], admin_ie_dict['id'], 'admin').iteritems().next()
admin_authz_subject_id, admin_authz_subject_dict = \
- self.tenant_api.get_subject_dict_from_keystone_name(tenant['id'], authz_ie_dict['id'], 'admin').iteritems().next()
- demo_subject_dict = self.manager.add_subject_dict(admin_subject_id, admin_ie_dict["id"],
- {"name": "demo", "description": "demo"})
+ self.admin_api.get_subject_dict_from_keystone_name(tenant['id'], authz_ie_dict['id'], 'admin').iteritems().next()
+ # demo_subject_dict = self.manager.add_subject_dict(admin_subject_id, admin_ie_dict["id"],
+ # {"name": "demo", "description": "demo"})
demo_subject_id, demo_subject_dict = \
- self.tenant_api.get_subject_dict_from_keystone_name(tenant['id'], admin_ie_dict['id'], 'demo').iteritems().next()
+ self.admin_api.get_subject_dict_from_keystone_name(tenant['id'], admin_ie_dict['id'], 'demo').iteritems().next()
demo_authz_subject_id, demo_authz_subject_dict = \
- self.tenant_api.get_subject_dict_from_keystone_name(tenant['id'], authz_ie_dict['id'], 'demo').iteritems().next()
+ self.admin_api.get_subject_dict_from_keystone_name(tenant['id'], authz_ie_dict['id'], 'demo').iteritems().next()
- subjects_dict = self.manager.get_subjects_dict(admin_subject_id, authz_ie_dict["id"])
+ subjects_dict = self.authz_manager.get_subjects_dict(admin_subject_id, authz_ie_dict["id"])
subject_categories = self.admin_manager.add_subject_category_dict(
admin_subject_id,
@@ -498,7 +487,7 @@ class TestIntraExtensionAdminManagerOK(tests.TestCase):
)
for subject_category_id in subject_categories:
- subject_category_scope = self.manager.get_subject_scopes_dict(
+ subject_category_scope = self.authz_manager.get_subject_scopes_dict(
admin_subject_id,
authz_ie_dict["id"],
subject_category_id)
@@ -529,7 +518,7 @@ class TestIntraExtensionAdminManagerOK(tests.TestCase):
new_subject_category_scope_2)
subject_category_scope_2_id = subject_category_scope_2.keys()[0]
- subject_category_assignments = self.manager.get_subject_assignment_list(
+ subject_category_assignments = self.authz_manager.get_subject_assignment_list(
admin_subject_id,
authz_ie_dict["id"],
admin_authz_subject_id,
@@ -538,7 +527,7 @@ class TestIntraExtensionAdminManagerOK(tests.TestCase):
self.assertIsInstance(subject_category_assignments, list)
self.assertEqual([], subject_category_assignments)
- subject_category_assignments = self.manager.get_subject_assignment_list(
+ subject_category_assignments = self.authz_manager.get_subject_assignment_list(
admin_subject_id,
authz_ie_dict["id"],
demo_authz_subject_id,
@@ -599,13 +588,13 @@ class TestIntraExtensionAdminManagerOK(tests.TestCase):
tenant, mapping = create_mapping(self, "demo", authz_ie_dict['id'], admin_ie_dict['id'])
admin_subject_id, admin_subject_dict = \
- self.tenant_api.get_subject_dict_from_keystone_name(tenant['id'], admin_ie_dict['id'], 'admin').iteritems().next()
- demo_subject_dict = self.manager.add_subject_dict(admin_subject_id, admin_ie_dict["id"],
- {"name": "demo", "description": "demo"})
+ self.admin_api.get_subject_dict_from_keystone_name(tenant['id'], admin_ie_dict['id'], 'admin').iteritems().next()
+ # demo_subject_dict = self.manager.add_subject_dict(admin_subject_id, admin_ie_dict["id"],
+ # {"name": "demo", "description": "demo"})
demo_subject_id, demo_subject_dict = \
- self.tenant_api.get_subject_dict_from_keystone_name(tenant['id'], admin_ie_dict['id'], 'demo').iteritems().next()
+ self.admin_api.get_subject_dict_from_keystone_name(tenant['id'], admin_ie_dict['id'], 'demo').iteritems().next()
- objects_dict = self.manager.get_objects_dict(admin_subject_id, authz_ie_dict["id"])
+ objects_dict = self.authz_manager.get_objects_dict(admin_subject_id, authz_ie_dict["id"])
object_vm1_id = None
object_vm2_id = None
@@ -627,7 +616,7 @@ class TestIntraExtensionAdminManagerOK(tests.TestCase):
)
for object_category_id in object_categories:
- object_category_scope = self.manager.get_object_scopes_dict(
+ object_category_scope = self.authz_manager.get_object_scopes_dict(
admin_subject_id,
authz_ie_dict["id"],
object_category_id)
@@ -658,7 +647,7 @@ class TestIntraExtensionAdminManagerOK(tests.TestCase):
new_object_category_scope_2)
object_category_scope_2_id = object_category_scope_2.keys()[0]
- object_category_assignments = self.manager.get_object_assignment_list(
+ object_category_assignments = self.authz_manager.get_object_assignment_list(
admin_subject_id,
authz_ie_dict["id"],
object_vm1_id,
@@ -667,7 +656,7 @@ class TestIntraExtensionAdminManagerOK(tests.TestCase):
self.assertIsInstance(object_category_assignments, list)
self.assertEqual([], object_category_assignments)
- object_category_assignments = self.manager.get_object_assignment_list(
+ object_category_assignments = self.authz_manager.get_object_assignment_list(
admin_subject_id,
authz_ie_dict["id"],
object_vm2_id,
@@ -728,13 +717,13 @@ class TestIntraExtensionAdminManagerOK(tests.TestCase):
tenant, mapping = create_mapping(self, "demo", authz_ie_dict['id'], admin_ie_dict['id'])
admin_subject_id, admin_subject_dict = \
- self.tenant_api.get_subject_dict_from_keystone_name(tenant['id'], admin_ie_dict['id'], 'admin').iteritems().next()
- demo_subject_dict = self.manager.add_subject_dict(admin_subject_id, admin_ie_dict["id"],
- {"name": "demo", "description": "demo"})
+ self.admin_api.get_subject_dict_from_keystone_name(tenant['id'], admin_ie_dict['id'], 'admin').iteritems().next()
+ # demo_subject_dict = self.manager.add_subject_dict(admin_subject_id, admin_ie_dict["id"],
+ # {"name": "demo", "description": "demo"})
demo_subject_id, demo_subject_dict = \
- self.tenant_api.get_subject_dict_from_keystone_name(tenant['id'], admin_ie_dict['id'], 'demo').iteritems().next()
+ self.admin_api.get_subject_dict_from_keystone_name(tenant['id'], admin_ie_dict['id'], 'demo').iteritems().next()
- actions_dict = self.manager.get_actions_dict(admin_subject_id, authz_ie_dict["id"])
+ actions_dict = self.authz_manager.get_actions_dict(admin_subject_id, authz_ie_dict["id"])
action_upload_id = None
action_list_id = None
@@ -756,7 +745,7 @@ class TestIntraExtensionAdminManagerOK(tests.TestCase):
)
for action_category_id in action_categories:
- action_category_scope = self.manager.get_action_scopes_dict(
+ action_category_scope = self.authz_manager.get_action_scopes_dict(
admin_subject_id,
authz_ie_dict["id"],
action_category_id)
@@ -787,7 +776,7 @@ class TestIntraExtensionAdminManagerOK(tests.TestCase):
new_action_category_scope_2)
action_category_scope_2_id = action_category_scope_2.keys()[0]
- action_category_assignments = self.manager.get_action_assignment_list(
+ action_category_assignments = self.authz_manager.get_action_assignment_list(
admin_subject_id,
authz_ie_dict["id"],
action_upload_id,
@@ -796,7 +785,7 @@ class TestIntraExtensionAdminManagerOK(tests.TestCase):
self.assertIsInstance(action_category_assignments, list)
self.assertEqual([], action_category_assignments)
- action_category_assignments = self.manager.get_action_assignment_list(
+ action_category_assignments = self.authz_manager.get_action_assignment_list(
admin_subject_id,
authz_ie_dict["id"],
action_list_id,
@@ -857,11 +846,11 @@ class TestIntraExtensionAdminManagerOK(tests.TestCase):
tenant, mapping = create_mapping(self, "demo", authz_ie_dict['id'], admin_ie_dict['id'])
admin_subject_id, admin_subject_dict = \
- self.tenant_api.get_subject_dict_from_keystone_name(tenant['id'], admin_ie_dict['id'], 'admin').iteritems().next()
- demo_subject_dict = self.admin_manager.add_subject_dict(admin_subject_id, admin_ie_dict["id"],
- {"name": "demo", "description": "demo"})
+ self.admin_api.get_subject_dict_from_keystone_name(tenant['id'], admin_ie_dict['id'], 'admin').iteritems().next()
+ # demo_subject_dict = self.admin_manager.add_subject_dict(admin_subject_id, admin_ie_dict["id"],
+ # {"name": "demo", "description": "demo"})
demo_subject_id, demo_subject_dict = \
- self.tenant_api.get_subject_dict_from_keystone_name(tenant['id'], admin_ie_dict['id'], 'demo').iteritems().next()
+ self.admin_api.get_subject_dict_from_keystone_name(tenant['id'], admin_ie_dict['id'], 'demo').iteritems().next()
aggregation_algorithms = self.admin_manager.get_aggregation_algorithm_dict(admin_subject_id, authz_ie_dict["id"])
for key, value in aggregation_algorithms.iteritems():
@@ -899,11 +888,11 @@ class TestIntraExtensionAdminManagerOK(tests.TestCase):
tenant, mapping = create_mapping(self, "demo", authz_ie_dict['id'], admin_ie_dict['id'])
admin_subject_id, admin_subject_dict = \
- self.tenant_api.get_subject_dict_from_keystone_name(tenant['id'], admin_ie_dict['id'], 'admin').iteritems().next()
- demo_subject_dict = self.admin_manager.add_subject_dict(admin_subject_id, admin_ie_dict["id"],
- {"name": "demo", "description": "demo"})
+ self.admin_api.get_subject_dict_from_keystone_name(tenant['id'], admin_ie_dict['id'], 'admin').iteritems().next()
+ # demo_subject_dict = self.admin_manager.add_subject_dict(admin_subject_id, admin_ie_dict["id"],
+ # {"name": "demo", "description": "demo"})
demo_subject_id, demo_subject_dict = \
- self.tenant_api.get_subject_dict_from_keystone_name(tenant['id'], admin_ie_dict['id'], 'demo').iteritems().next()
+ self.admin_api.get_subject_dict_from_keystone_name(tenant['id'], admin_ie_dict['id'], 'demo').iteritems().next()
sub_meta_rules = self.admin_manager.get_sub_meta_rules_dict(admin_subject_id, authz_ie_dict["id"])
self.assertIsInstance(sub_meta_rules, dict)
@@ -978,15 +967,16 @@ class TestIntraExtensionAdminManagerKO(tests.TestCase):
def setUp(self):
self.useFixture(database.Database())
super(TestIntraExtensionAdminManagerKO, self).setUp()
- self.load_backends()
self.load_fixtures(default_fixtures)
+ self.load_backends()
+ domain = {'id': "default", 'name': "default"}
+ self.resource_api.create_domain(domain['id'], domain)
self.admin = create_user(self, username="admin")
self.demo = create_user(self, username="demo")
- self.root_intra_extension = create_intra_extension(self, policy_model="policy_root")
- # force re-initialization of the ADMIN_ID variable
- from keystone.contrib.moon.core import ADMIN_ID
- self.ADMIN_ID = ADMIN_ID
- self.manager = self.authz_api
+ self.root_intra_extension = self.root_api.get_root_extension_dict()
+ self.root_intra_extension_id = self.root_intra_extension.keys()[0]
+ self.ADMIN_ID = self.root_api.get_root_admin_id()
+ self.authz_manager = self.authz_api
self.admin_manager = self.admin_api
def __get_key_from_value(self, value, values_dict):
@@ -1017,12 +1007,12 @@ class TestIntraExtensionAdminManagerKO(tests.TestCase):
tenant, mapping = create_mapping(self, "demo", authz_ie_dict['id'], admin_ie_dict['id'])
admin_subject_id, admin_subject_dict = \
- self.tenant_api.get_subject_dict_from_keystone_name(tenant['id'], admin_ie_dict['id'], 'admin').iteritems().next()
- demo_subject_dict = self.manager.add_subject_dict(admin_subject_id, admin_ie_dict["id"],
- {"name": "demo", "description": "demo"})
+ self.admin_api.get_subject_dict_from_keystone_name(tenant['id'], admin_ie_dict['id'], 'admin').iteritems().next()
+ # demo_subject_dict = self.manager.add_subject_dict(admin_subject_id, admin_ie_dict["id"],
+ # {"name": "demo", "description": "demo"})
demo_subject_id, demo_subject_dict = \
- self.tenant_api.get_subject_dict_from_keystone_name(tenant['id'], admin_ie_dict['id'], 'demo').iteritems().next()
- subjects = self.manager.get_subjects_dict(admin_subject_id, authz_ie_dict["id"])
+ self.admin_api.get_subject_dict_from_keystone_name(tenant['id'], admin_ie_dict['id'], 'demo').iteritems().next()
+ subjects = self.authz_manager.get_subjects_dict(admin_subject_id, authz_ie_dict["id"])
self.assertIsInstance(subjects, dict)
for key, value in subjects.iteritems():
self.assertIsInstance(value, dict)
@@ -1035,7 +1025,7 @@ class TestIntraExtensionAdminManagerKO(tests.TestCase):
new_subject = {"name": "subject_test", "description": "subject_test"}
self.assertRaises(
AuthzException,
- self.manager.add_subject_dict,
+ self.authz_manager.add_subject_dict,
demo_subject_id, admin_ie_dict["id"], new_subject)
subjects = self.admin_manager.add_subject_dict(admin_subject_id, authz_ie_dict["id"], new_subject)
@@ -1052,11 +1042,11 @@ class TestIntraExtensionAdminManagerKO(tests.TestCase):
# Delete the new subject
self.assertRaises(
AuthzException,
- self.manager.del_subject,
+ self.authz_manager.del_subject,
demo_subject_id, authz_ie_dict["id"], new_subject["id"])
self.admin_manager.del_subject(admin_subject_id, authz_ie_dict["id"], new_subject["id"])
- subjects = self.manager.get_subjects_dict(admin_subject_id, authz_ie_dict["id"])
+ subjects = self.authz_manager.get_subjects_dict(admin_subject_id, authz_ie_dict["id"])
for key, value in subjects.iteritems():
self.assertIsInstance(value, dict)
self.assertIn("name", value)
@@ -1069,12 +1059,12 @@ class TestIntraExtensionAdminManagerKO(tests.TestCase):
tenant, mapping = create_mapping(self, "demo", authz_ie_dict['id'], admin_ie_dict['id'])
admin_subject_id, admin_subject_dict = \
- self.tenant_api.get_subject_dict_from_keystone_name(tenant['id'], admin_ie_dict['id'], 'admin').iteritems().next()
- demo_subject_dict = self.manager.add_subject_dict(admin_subject_id, admin_ie_dict["id"],
- {"name": "demo", "description": "demo"})
+ self.admin_api.get_subject_dict_from_keystone_name(tenant['id'], admin_ie_dict['id'], 'admin').iteritems().next()
+ # demo_subject_dict = self.manager.add_subject_dict(admin_subject_id, admin_ie_dict["id"],
+ # {"name": "demo", "description": "demo"})
demo_subject_id, demo_subject_dict = \
- self.tenant_api.get_subject_dict_from_keystone_name(tenant['id'], admin_ie_dict['id'], 'demo').iteritems().next()
- objects = self.manager.get_objects_dict(admin_subject_id, authz_ie_dict["id"])
+ self.admin_api.get_subject_dict_from_keystone_name(tenant['id'], admin_ie_dict['id'], 'demo').iteritems().next()
+ objects = self.authz_manager.get_objects_dict(admin_subject_id, authz_ie_dict["id"])
objects_id_list = []
self.assertIsInstance(objects, dict)
for key, value in objects.iteritems():
@@ -1087,35 +1077,35 @@ class TestIntraExtensionAdminManagerKO(tests.TestCase):
new_object = {"name": "object_test", "description": "object_test"}
self.assertRaises(
AuthzException,
- self.manager.add_object_dict,
+ self.authz_manager.add_object_dict,
demo_subject_id, admin_ie_dict["id"], new_object)
self.assertRaises(
ObjectsWriteNoAuthorized,
self.admin_manager.add_object_dict,
- admin_subject_id, authz_ie_dict["id"], new_object
+ admin_subject_id, admin_ie_dict["id"], new_object
)
# Delete the new object
for key in objects_id_list:
self.assertRaises(
AuthzException,
- self.manager.del_object,
+ self.authz_manager.del_object,
demo_subject_id, authz_ie_dict["id"], key)
self.assertRaises(
AuthzException,
- self.manager.del_object,
+ self.authz_manager.del_object,
admin_subject_id, authz_ie_dict["id"], key)
for key in objects_id_list:
self.assertRaises(
ObjectsWriteNoAuthorized,
self.admin_manager.del_object,
- demo_subject_id, authz_ie_dict["id"], key)
+ demo_subject_id, admin_ie_dict["id"], key)
self.assertRaises(
ObjectsWriteNoAuthorized,
self.admin_manager.del_object,
- admin_subject_id, authz_ie_dict["id"], key)
+ admin_subject_id, admin_ie_dict["id"], key)
def test_actions(self):
authz_ie_dict = create_intra_extension(self, "policy_authz")
@@ -1123,12 +1113,12 @@ class TestIntraExtensionAdminManagerKO(tests.TestCase):
tenant, mapping = create_mapping(self, "demo", authz_ie_dict['id'], admin_ie_dict['id'])
admin_subject_id, admin_subject_dict = \
- self.tenant_api.get_subject_dict_from_keystone_name(tenant['id'], admin_ie_dict['id'], 'admin').iteritems().next()
- demo_subject_dict = self.manager.add_subject_dict(admin_subject_id, admin_ie_dict["id"],
- {"name": "demo", "description": "demo"})
+ self.admin_api.get_subject_dict_from_keystone_name(tenant['id'], admin_ie_dict['id'], 'admin').iteritems().next()
+ # demo_subject_dict = self.manager.add_subject_dict(admin_subject_id, admin_ie_dict["id"],
+ # {"name": "demo", "description": "demo"})
demo_subject_id, demo_subject_dict = \
- self.tenant_api.get_subject_dict_from_keystone_name(tenant['id'], admin_ie_dict['id'], 'demo').iteritems().next()
- actions = self.manager.get_actions_dict(admin_subject_id, authz_ie_dict["id"])
+ self.admin_api.get_subject_dict_from_keystone_name(tenant['id'], admin_ie_dict['id'], 'demo').iteritems().next()
+ actions = self.authz_manager.get_actions_dict(admin_subject_id, authz_ie_dict["id"])
actions_id_list = []
self.assertIsInstance(actions, dict)
for key, value in actions.iteritems():
@@ -1141,35 +1131,35 @@ class TestIntraExtensionAdminManagerKO(tests.TestCase):
new_action = {"name": "action_test", "description": "action_test"}
self.assertRaises(
AuthzException,
- self.manager.add_action_dict,
+ self.authz_manager.add_action_dict,
demo_subject_id, admin_ie_dict["id"], new_action)
self.assertRaises(
ActionsWriteNoAuthorized,
self.admin_manager.add_action_dict,
- admin_subject_id, authz_ie_dict["id"], new_action
+ admin_subject_id, admin_ie_dict["id"], new_action
)
# Delete all actions
for key in actions_id_list:
self.assertRaises(
AuthzException,
- self.manager.del_action,
+ self.authz_manager.del_action,
demo_subject_id, authz_ie_dict["id"], key)
self.assertRaises(
AuthzException,
- self.manager.del_action,
+ self.authz_manager.del_action,
admin_subject_id, authz_ie_dict["id"], key)
for key in actions_id_list:
self.assertRaises(
ActionsWriteNoAuthorized,
self.admin_manager.del_action,
- demo_subject_id, authz_ie_dict["id"], key)
+ demo_subject_id, admin_ie_dict["id"], key)
self.assertRaises(
ActionsWriteNoAuthorized,
self.admin_manager.del_action,
- admin_subject_id, authz_ie_dict["id"], key)
+ admin_subject_id, admin_ie_dict["id"], key)
def test_subject_categories(self):
authz_ie_dict = create_intra_extension(self, "policy_authz")
@@ -1177,12 +1167,12 @@ class TestIntraExtensionAdminManagerKO(tests.TestCase):
tenant, mapping = create_mapping(self, "demo", authz_ie_dict['id'], admin_ie_dict['id'])
admin_subject_id, admin_subject_dict = \
- self.tenant_api.get_subject_dict_from_keystone_name(tenant['id'], admin_ie_dict['id'], 'admin').iteritems().next()
- demo_subject_dict = self.manager.add_subject_dict(admin_subject_id, admin_ie_dict["id"],
- {"name": "demo", "description": "demo"})
+ self.admin_api.get_subject_dict_from_keystone_name(tenant['id'], admin_ie_dict['id'], 'admin').iteritems().next()
+ # demo_subject_dict = self.manager.add_subject_dict(admin_subject_id, admin_ie_dict["id"],
+ # {"name": "demo", "description": "demo"})
demo_subject_id, demo_subject_dict = \
- self.tenant_api.get_subject_dict_from_keystone_name(tenant['id'], admin_ie_dict['id'], 'demo').iteritems().next()
- subject_categories = self.manager.get_subject_categories_dict(admin_subject_id, authz_ie_dict["id"])
+ self.admin_api.get_subject_dict_from_keystone_name(tenant['id'], admin_ie_dict['id'], 'demo').iteritems().next()
+ subject_categories = self.authz_manager.get_subject_categories_dict(admin_subject_id, authz_ie_dict["id"])
self.assertIsInstance(subject_categories, dict)
for key, value in subject_categories.iteritems():
self.assertIsInstance(value, dict)
@@ -1192,7 +1182,7 @@ class TestIntraExtensionAdminManagerKO(tests.TestCase):
new_subject_category = {"name": "subject_category_test", "description": "subject_category_test"}
self.assertRaises(
AuthzException,
- self.manager.add_subject_category_dict,
+ self.authz_manager.add_subject_category_dict,
demo_subject_id, admin_ie_dict["id"], new_subject_category)
subject_categories = self.admin_manager.add_subject_category_dict(admin_subject_id, authz_ie_dict["id"], new_subject_category)
@@ -1209,11 +1199,11 @@ class TestIntraExtensionAdminManagerKO(tests.TestCase):
# Delete the new subject_category
self.assertRaises(
AuthzException,
- self.manager.del_subject_category,
+ self.authz_manager.del_subject_category,
demo_subject_id, authz_ie_dict["id"], new_subject_category["id"])
self.admin_manager.del_subject_category(admin_subject_id, authz_ie_dict["id"], new_subject_category["id"])
- subject_categories = self.manager.get_subject_categories_dict(admin_subject_id, authz_ie_dict["id"])
+ subject_categories = self.authz_manager.get_subject_categories_dict(admin_subject_id, authz_ie_dict["id"])
for key, value in subject_categories.iteritems():
self.assertIsInstance(value, dict)
self.assertIn("name", value)
@@ -1226,12 +1216,12 @@ class TestIntraExtensionAdminManagerKO(tests.TestCase):
tenant, mapping = create_mapping(self, "demo", authz_ie_dict['id'], admin_ie_dict['id'])
admin_subject_id, admin_subject_dict = \
- self.tenant_api.get_subject_dict_from_keystone_name(tenant['id'], admin_ie_dict['id'], 'admin').iteritems().next()
- demo_subject_dict = self.manager.add_subject_dict(admin_subject_id, admin_ie_dict["id"],
- {"name": "demo", "description": "demo"})
+ self.admin_api.get_subject_dict_from_keystone_name(tenant['id'], admin_ie_dict['id'], 'admin').iteritems().next()
+ # demo_subject_dict = self.manager.add_subject_dict(admin_subject_id, admin_ie_dict["id"],
+ # {"name": "demo", "description": "demo"})
demo_subject_id, demo_subject_dict = \
- self.tenant_api.get_subject_dict_from_keystone_name(tenant['id'], admin_ie_dict['id'], 'demo').iteritems().next()
- object_categories = self.manager.get_object_categories_dict(admin_subject_id, authz_ie_dict["id"])
+ self.admin_api.get_subject_dict_from_keystone_name(tenant['id'], admin_ie_dict['id'], 'demo').iteritems().next()
+ object_categories = self.authz_manager.get_object_categories_dict(admin_subject_id, authz_ie_dict["id"])
self.assertIsInstance(object_categories, dict)
for key, value in object_categories.iteritems():
self.assertIsInstance(value, dict)
@@ -1241,7 +1231,7 @@ class TestIntraExtensionAdminManagerKO(tests.TestCase):
new_object_category = {"name": "object_category_test", "description": "object_category_test"}
self.assertRaises(
AuthzException,
- self.manager.add_object_category_dict,
+ self.authz_manager.add_object_category_dict,
demo_subject_id, admin_ie_dict["id"], new_object_category)
object_categories = self.admin_manager.add_object_category_dict(admin_subject_id, authz_ie_dict["id"], new_object_category)
@@ -1258,11 +1248,11 @@ class TestIntraExtensionAdminManagerKO(tests.TestCase):
# Delete the new object_category
self.assertRaises(
AuthzException,
- self.manager.del_object_category,
+ self.authz_manager.del_object_category,
demo_subject_id, authz_ie_dict["id"], new_object_category["id"])
self.admin_manager.del_object_category(admin_subject_id, authz_ie_dict["id"], new_object_category["id"])
- object_categories = self.manager.get_object_categories_dict(admin_subject_id, authz_ie_dict["id"])
+ object_categories = self.authz_manager.get_object_categories_dict(admin_subject_id, authz_ie_dict["id"])
for key, value in object_categories.iteritems():
self.assertIsInstance(value, dict)
self.assertIn("name", value)
@@ -1275,12 +1265,12 @@ class TestIntraExtensionAdminManagerKO(tests.TestCase):
tenant, mapping = create_mapping(self, "demo", authz_ie_dict['id'], admin_ie_dict['id'])
admin_subject_id, admin_subject_dict = \
- self.tenant_api.get_subject_dict_from_keystone_name(tenant['id'], admin_ie_dict['id'], 'admin').iteritems().next()
- demo_subject_dict = self.manager.add_subject_dict(admin_subject_id, admin_ie_dict["id"],
- {"name": "demo", "description": "demo"})
+ self.admin_api.get_subject_dict_from_keystone_name(tenant['id'], admin_ie_dict['id'], 'admin').iteritems().next()
+ # demo_subject_dict = self.manager.add_subject_dict(admin_subject_id, admin_ie_dict["id"],
+ # {"name": "demo", "description": "demo"})
demo_subject_id, demo_subject_dict = \
- self.tenant_api.get_subject_dict_from_keystone_name(tenant['id'], admin_ie_dict['id'], 'demo').iteritems().next()
- action_categories = self.manager.get_action_categories_dict(admin_subject_id, authz_ie_dict["id"])
+ self.admin_api.get_subject_dict_from_keystone_name(tenant['id'], admin_ie_dict['id'], 'demo').iteritems().next()
+ action_categories = self.authz_manager.get_action_categories_dict(admin_subject_id, authz_ie_dict["id"])
self.assertIsInstance(action_categories, dict)
for key, value in action_categories.iteritems():
self.assertIsInstance(value, dict)
@@ -1290,7 +1280,7 @@ class TestIntraExtensionAdminManagerKO(tests.TestCase):
new_action_category = {"name": "action_category_test", "description": "action_category_test"}
self.assertRaises(
AuthzException,
- self.manager.add_action_category_dict,
+ self.authz_manager.add_action_category_dict,
demo_subject_id, admin_ie_dict["id"], new_action_category)
action_categories = self.admin_manager.add_action_category_dict(admin_subject_id, authz_ie_dict["id"], new_action_category)
@@ -1307,11 +1297,11 @@ class TestIntraExtensionAdminManagerKO(tests.TestCase):
# Delete the new action_category
self.assertRaises(
AuthzException,
- self.manager.del_action_category,
+ self.authz_manager.del_action_category,
demo_subject_id, authz_ie_dict["id"], new_action_category["id"])
self.admin_manager.del_action_category(admin_subject_id, authz_ie_dict["id"], new_action_category["id"])
- action_categories = self.manager.get_action_categories_dict(admin_subject_id, authz_ie_dict["id"])
+ action_categories = self.authz_manager.get_action_categories_dict(admin_subject_id, authz_ie_dict["id"])
for key, value in action_categories.iteritems():
self.assertIsInstance(value, dict)
self.assertIn("name", value)
@@ -1324,11 +1314,11 @@ class TestIntraExtensionAdminManagerKO(tests.TestCase):
tenant, mapping = create_mapping(self, "demo", authz_ie_dict['id'], admin_ie_dict['id'])
admin_subject_id, admin_subject_dict = \
- self.tenant_api.get_subject_dict_from_keystone_name(tenant['id'], admin_ie_dict['id'], 'admin').iteritems().next()
- demo_subject_dict = self.manager.add_subject_dict(admin_subject_id, admin_ie_dict["id"],
- {"name": "demo", "description": "demo"})
+ self.admin_api.get_subject_dict_from_keystone_name(tenant['id'], admin_ie_dict['id'], 'admin').iteritems().next()
+ # demo_subject_dict = self.manager.add_subject_dict(admin_subject_id, admin_ie_dict["id"],
+ # {"name": "demo", "description": "demo"})
demo_subject_id, demo_subject_dict = \
- self.tenant_api.get_subject_dict_from_keystone_name(tenant['id'], admin_ie_dict['id'], 'demo').iteritems().next()
+ self.admin_api.get_subject_dict_from_keystone_name(tenant['id'], admin_ie_dict['id'], 'demo').iteritems().next()
subject_categories = self.admin_manager.add_subject_category_dict(
admin_subject_id,
@@ -1341,7 +1331,7 @@ class TestIntraExtensionAdminManagerKO(tests.TestCase):
for subject_category_id in subject_categories:
- subject_category_scope = self.manager.get_subject_scopes_dict(
+ subject_category_scope = self.authz_manager.get_subject_scopes_dict(
admin_subject_id,
authz_ie_dict["id"],
subject_category_id)
@@ -1396,11 +1386,11 @@ class TestIntraExtensionAdminManagerKO(tests.TestCase):
tenant, mapping = create_mapping(self, "demo", authz_ie_dict['id'], admin_ie_dict['id'])
admin_subject_id, admin_subject_dict = \
- self.tenant_api.get_subject_dict_from_keystone_name(tenant['id'], admin_ie_dict['id'], 'admin').iteritems().next()
- demo_subject_dict = self.manager.add_subject_dict(admin_subject_id, admin_ie_dict["id"],
- {"name": "demo", "description": "demo"})
+ self.admin_api.get_subject_dict_from_keystone_name(tenant['id'], admin_ie_dict['id'], 'admin').iteritems().next()
+ # demo_subject_dict = self.manager.add_subject_dict(admin_subject_id, admin_ie_dict["id"],
+ # {"name": "demo", "description": "demo"})
demo_subject_id, demo_subject_dict = \
- self.tenant_api.get_subject_dict_from_keystone_name(tenant['id'], admin_ie_dict['id'], 'demo').iteritems().next()
+ self.admin_api.get_subject_dict_from_keystone_name(tenant['id'], admin_ie_dict['id'], 'demo').iteritems().next()
object_categories = self.admin_manager.add_object_category_dict(
admin_subject_id,
@@ -1413,7 +1403,7 @@ class TestIntraExtensionAdminManagerKO(tests.TestCase):
for object_category_id in object_categories:
- object_category_scope = self.manager.get_object_scopes_dict(
+ object_category_scope = self.authz_manager.get_object_scopes_dict(
admin_subject_id,
authz_ie_dict["id"],
object_category_id)
@@ -1468,11 +1458,11 @@ class TestIntraExtensionAdminManagerKO(tests.TestCase):
tenant, mapping = create_mapping(self, "demo", authz_ie_dict['id'], admin_ie_dict['id'])
admin_subject_id, admin_subject_dict = \
- self.tenant_api.get_subject_dict_from_keystone_name(tenant['id'], admin_ie_dict['id'], 'admin').iteritems().next()
- demo_subject_dict = self.manager.add_subject_dict(admin_subject_id, admin_ie_dict["id"],
- {"name": "demo", "description": "demo"})
+ self.admin_api.get_subject_dict_from_keystone_name(tenant['id'], admin_ie_dict['id'], 'admin').iteritems().next()
+ # demo_subject_dict = self.manager.add_subject_dict(admin_subject_id, admin_ie_dict["id"],
+ # {"name": "demo", "description": "demo"})
demo_subject_id, demo_subject_dict = \
- self.tenant_api.get_subject_dict_from_keystone_name(tenant['id'], admin_ie_dict['id'], 'demo').iteritems().next()
+ self.admin_api.get_subject_dict_from_keystone_name(tenant['id'], admin_ie_dict['id'], 'demo').iteritems().next()
action_categories = self.admin_manager.add_action_category_dict(
admin_subject_id,
@@ -1485,7 +1475,7 @@ class TestIntraExtensionAdminManagerKO(tests.TestCase):
for action_category_id in action_categories:
- action_category_scope = self.manager.get_action_scopes_dict(
+ action_category_scope = self.authz_manager.get_action_scopes_dict(
admin_subject_id,
authz_ie_dict["id"],
action_category_id)
@@ -1540,17 +1530,17 @@ class TestIntraExtensionAdminManagerKO(tests.TestCase):
tenant, mapping = create_mapping(self, "demo", authz_ie_dict['id'], admin_ie_dict['id'])
admin_subject_id, admin_subject_dict = \
- self.tenant_api.get_subject_dict_from_keystone_name(tenant['id'], admin_ie_dict['id'], 'admin').iteritems().next()
+ self.admin_api.get_subject_dict_from_keystone_name(tenant['id'], admin_ie_dict['id'], 'admin').iteritems().next()
admin_authz_subject_id, admin_authz_subject_dict = \
- self.tenant_api.get_subject_dict_from_keystone_name(tenant['id'], authz_ie_dict['id'], 'admin').iteritems().next()
- demo_subject_dict = self.manager.add_subject_dict(admin_subject_id, admin_ie_dict["id"],
- {"name": "demo", "description": "demo"})
+ self.admin_api.get_subject_dict_from_keystone_name(tenant['id'], authz_ie_dict['id'], 'admin').iteritems().next()
+ # demo_subject_dict = self.manager.add_subject_dict(admin_subject_id, admin_ie_dict["id"],
+ # {"name": "demo", "description": "demo"})
demo_subject_id, demo_subject_dict = \
- self.tenant_api.get_subject_dict_from_keystone_name(tenant['id'], admin_ie_dict['id'], 'demo').iteritems().next()
+ self.admin_api.get_subject_dict_from_keystone_name(tenant['id'], admin_ie_dict['id'], 'demo').iteritems().next()
demo_authz_subject_id, demo_authz_subject_dict = \
- self.tenant_api.get_subject_dict_from_keystone_name(tenant['id'], authz_ie_dict['id'], 'demo').iteritems().next()
+ self.admin_api.get_subject_dict_from_keystone_name(tenant['id'], authz_ie_dict['id'], 'demo').iteritems().next()
- subjects_dict = self.manager.get_subjects_dict(admin_subject_id, authz_ie_dict["id"])
+ subjects_dict = self.authz_manager.get_subjects_dict(admin_subject_id, authz_ie_dict["id"])
subject_categories = self.admin_manager.add_subject_category_dict(
admin_subject_id,
@@ -1562,7 +1552,7 @@ class TestIntraExtensionAdminManagerKO(tests.TestCase):
)
for subject_category_id in subject_categories:
- subject_category_scope = self.manager.get_subject_scopes_dict(
+ subject_category_scope = self.authz_manager.get_subject_scopes_dict(
admin_subject_id,
authz_ie_dict["id"],
subject_category_id)
@@ -1593,7 +1583,7 @@ class TestIntraExtensionAdminManagerKO(tests.TestCase):
new_subject_category_scope_2)
subject_category_scope_2_id = subject_category_scope_2.keys()[0]
- subject_category_assignments = self.manager.get_subject_assignment_list(
+ subject_category_assignments = self.authz_manager.get_subject_assignment_list(
admin_subject_id,
authz_ie_dict["id"],
admin_authz_subject_id,
@@ -1602,7 +1592,7 @@ class TestIntraExtensionAdminManagerKO(tests.TestCase):
self.assertIsInstance(subject_category_assignments, list)
self.assertEqual([], subject_category_assignments)
- subject_category_assignments = self.manager.get_subject_assignment_list(
+ subject_category_assignments = self.authz_manager.get_subject_assignment_list(
admin_subject_id,
authz_ie_dict["id"],
demo_authz_subject_id,
@@ -1613,14 +1603,14 @@ class TestIntraExtensionAdminManagerKO(tests.TestCase):
self.assertRaises(
AuthzException,
- self.manager.add_subject_assignment_list,
+ self.authz_manager.add_subject_assignment_list,
demo_subject_id, authz_ie_dict["id"],
admin_authz_subject_id, subject_category_id, subject_category_scope_1_id
)
self.assertRaises(
AuthzException,
- self.manager.add_subject_assignment_list,
+ self.authz_manager.add_subject_assignment_list,
demo_subject_id, authz_ie_dict["id"],
demo_authz_subject_id, subject_category_id, subject_category_scope_2_id
)
@@ -1692,13 +1682,13 @@ class TestIntraExtensionAdminManagerKO(tests.TestCase):
tenant, mapping = create_mapping(self, "demo", authz_ie_dict['id'], admin_ie_dict['id'])
admin_subject_id, admin_subject_dict = \
- self.tenant_api.get_subject_dict_from_keystone_name(tenant['id'], admin_ie_dict['id'], 'admin').iteritems().next()
- demo_subject_dict = self.manager.add_subject_dict(admin_subject_id, admin_ie_dict["id"],
- {"name": "demo", "description": "demo"})
+ self.admin_api.get_subject_dict_from_keystone_name(tenant['id'], admin_ie_dict['id'], 'admin').iteritems().next()
+ # demo_subject_dict = self.manager.add_subject_dict(admin_subject_id, admin_ie_dict["id"],
+ # {"name": "demo", "description": "demo"})
demo_subject_id, demo_subject_dict = \
- self.tenant_api.get_subject_dict_from_keystone_name(tenant['id'], admin_ie_dict['id'], 'demo').iteritems().next()
+ self.admin_api.get_subject_dict_from_keystone_name(tenant['id'], admin_ie_dict['id'], 'demo').iteritems().next()
- objects_dict = self.manager.get_objects_dict(admin_subject_id, authz_ie_dict["id"])
+ objects_dict = self.authz_manager.get_objects_dict(admin_subject_id, authz_ie_dict["id"])
object_vm1_id = None
object_vm2_id = None
@@ -1720,7 +1710,7 @@ class TestIntraExtensionAdminManagerKO(tests.TestCase):
)
for object_category_id in object_categories:
- object_category_scope = self.manager.get_object_scopes_dict(
+ object_category_scope = self.authz_manager.get_object_scopes_dict(
admin_subject_id,
authz_ie_dict["id"],
object_category_id)
@@ -1751,7 +1741,7 @@ class TestIntraExtensionAdminManagerKO(tests.TestCase):
new_object_category_scope_2)
object_category_scope_2_id = object_category_scope_2.keys()[0]
- object_category_assignments = self.manager.get_object_assignment_list(
+ object_category_assignments = self.authz_manager.get_object_assignment_list(
admin_subject_id,
authz_ie_dict["id"],
object_vm1_id,
@@ -1760,7 +1750,7 @@ class TestIntraExtensionAdminManagerKO(tests.TestCase):
self.assertIsInstance(object_category_assignments, list)
self.assertEqual([], object_category_assignments)
- object_category_assignments = self.manager.get_object_assignment_list(
+ object_category_assignments = self.authz_manager.get_object_assignment_list(
admin_subject_id,
authz_ie_dict["id"],
object_vm2_id,
@@ -1771,14 +1761,14 @@ class TestIntraExtensionAdminManagerKO(tests.TestCase):
self.assertRaises(
AuthzException,
- self.manager.add_object_assignment_list,
+ self.authz_manager.add_object_assignment_list,
demo_subject_id, authz_ie_dict["id"],
object_vm1_id, object_category_id, object_category_scope_1_id
)
self.assertRaises(
AuthzException,
- self.manager.add_object_assignment_list,
+ self.authz_manager.add_object_assignment_list,
demo_subject_id, authz_ie_dict["id"],
object_vm2_id, object_category_id, object_category_scope_2_id
)
@@ -1850,13 +1840,13 @@ class TestIntraExtensionAdminManagerKO(tests.TestCase):
tenant, mapping = create_mapping(self, "demo", authz_ie_dict['id'], admin_ie_dict['id'])
admin_subject_id, admin_subject_dict = \
- self.tenant_api.get_subject_dict_from_keystone_name(tenant['id'], admin_ie_dict['id'], 'admin').iteritems().next()
- demo_subject_dict = self.manager.add_subject_dict(admin_subject_id, admin_ie_dict["id"],
- {"name": "demo", "description": "demo"})
+ self.admin_api.get_subject_dict_from_keystone_name(tenant['id'], admin_ie_dict['id'], 'admin').iteritems().next()
+ # demo_subject_dict = self.manager.add_subject_dict(admin_subject_id, admin_ie_dict["id"],
+ # {"name": "demo", "description": "demo"})
demo_subject_id, demo_subject_dict = \
- self.tenant_api.get_subject_dict_from_keystone_name(tenant['id'], admin_ie_dict['id'], 'demo').iteritems().next()
+ self.admin_api.get_subject_dict_from_keystone_name(tenant['id'], admin_ie_dict['id'], 'demo').iteritems().next()
- actions_dict = self.manager.get_actions_dict(admin_subject_id, authz_ie_dict["id"])
+ actions_dict = self.authz_manager.get_actions_dict(admin_subject_id, authz_ie_dict["id"])
action_upload_id = None
action_list_id = None
@@ -1878,7 +1868,7 @@ class TestIntraExtensionAdminManagerKO(tests.TestCase):
)
for action_category_id in action_categories:
- action_category_scope = self.manager.get_action_scopes_dict(
+ action_category_scope = self.authz_manager.get_action_scopes_dict(
admin_subject_id,
authz_ie_dict["id"],
action_category_id)
@@ -1909,7 +1899,7 @@ class TestIntraExtensionAdminManagerKO(tests.TestCase):
new_action_category_scope_2)
action_category_scope_2_id = action_category_scope_2.keys()[0]
- action_category_assignments = self.manager.get_action_assignment_list(
+ action_category_assignments = self.authz_manager.get_action_assignment_list(
admin_subject_id,
authz_ie_dict["id"],
action_upload_id,
@@ -1918,7 +1908,7 @@ class TestIntraExtensionAdminManagerKO(tests.TestCase):
self.assertIsInstance(action_category_assignments, list)
self.assertEqual([], action_category_assignments)
- action_category_assignments = self.manager.get_action_assignment_list(
+ action_category_assignments = self.authz_manager.get_action_assignment_list(
admin_subject_id,
authz_ie_dict["id"],
action_list_id,
@@ -1929,14 +1919,14 @@ class TestIntraExtensionAdminManagerKO(tests.TestCase):
self.assertRaises(
AuthzException,
- self.manager.add_action_assignment_list,
+ self.authz_manager.add_action_assignment_list,
demo_subject_id, authz_ie_dict["id"],
action_upload_id, action_category_id, action_category_scope_1_id
)
self.assertRaises(
AuthzException,
- self.manager.add_action_assignment_list,
+ self.authz_manager.add_action_assignment_list,
demo_subject_id, authz_ie_dict["id"],
action_list_id, action_category_id, action_category_scope_2_id
)
@@ -2008,11 +1998,11 @@ class TestIntraExtensionAdminManagerKO(tests.TestCase):
tenant, mapping = create_mapping(self, "demo", authz_ie_dict['id'], admin_ie_dict['id'])
admin_subject_id, admin_subject_dict = \
- self.tenant_api.get_subject_dict_from_keystone_name(tenant['id'], admin_ie_dict['id'], 'admin').iteritems().next()
- demo_subject_dict = self.admin_manager.add_subject_dict(admin_subject_id, admin_ie_dict["id"],
- {"name": "demo", "description": "demo"})
+ self.admin_api.get_subject_dict_from_keystone_name(tenant['id'], admin_ie_dict['id'], 'admin').iteritems().next()
+ # demo_subject_dict = self.admin_manager.add_subject_dict(admin_subject_id, admin_ie_dict["id"],
+ # {"name": "demo", "description": "demo"})
demo_subject_id, demo_subject_dict = \
- self.tenant_api.get_subject_dict_from_keystone_name(tenant['id'], admin_ie_dict['id'], 'demo').iteritems().next()
+ self.admin_api.get_subject_dict_from_keystone_name(tenant['id'], admin_ie_dict['id'], 'demo').iteritems().next()
aggregation_algorithms = self.admin_manager.get_aggregation_algorithm_dict(admin_subject_id, authz_ie_dict["id"])
for key, value in aggregation_algorithms.iteritems():
@@ -2050,11 +2040,11 @@ class TestIntraExtensionAdminManagerKO(tests.TestCase):
tenant, mapping = create_mapping(self, "demo", authz_ie_dict['id'], admin_ie_dict['id'])
admin_subject_id, admin_subject_dict = \
- self.tenant_api.get_subject_dict_from_keystone_name(tenant['id'], admin_ie_dict['id'], 'admin').iteritems().next()
- demo_subject_dict = self.admin_manager.add_subject_dict(admin_subject_id, admin_ie_dict["id"],
- {"name": "demo", "description": "demo"})
+ self.admin_api.get_subject_dict_from_keystone_name(tenant['id'], admin_ie_dict['id'], 'admin').iteritems().next()
+ # demo_subject_dict = self.admin_manager.add_subject_dict(admin_subject_id, admin_ie_dict["id"],
+ # {"name": "demo", "description": "demo"})
demo_subject_id, demo_subject_dict = \
- self.tenant_api.get_subject_dict_from_keystone_name(tenant['id'], admin_ie_dict['id'], 'demo').iteritems().next()
+ self.admin_api.get_subject_dict_from_keystone_name(tenant['id'], admin_ie_dict['id'], 'demo').iteritems().next()
sub_meta_rules = self.admin_manager.get_sub_meta_rules_dict(admin_subject_id, authz_ie_dict["id"])
self.assertIsInstance(sub_meta_rules, dict)
diff --git a/keystone-moon/keystone/tests/moon/unit/test_unit_core_intra_extension_authz.py b/keystone-moon/keystone/tests/moon/unit/test_unit_core_intra_extension_authz.py
index 2f75acaf..c96c00b5 100644
--- a/keystone-moon/keystone/tests/moon/unit/test_unit_core_intra_extension_authz.py
+++ b/keystone-moon/keystone/tests/moon/unit/test_unit_core_intra_extension_authz.py
@@ -10,12 +10,12 @@ import os
import uuid
from oslo_config import cfg
from keystone.tests import unit as tests
-from keystone.contrib.moon.core import IntraExtensionAdminManager, IntraExtensionAuthzManager
+from keystone.contrib.moon.core import IntraExtensionAdminManager, IntraExtensionAuthzManager, IntraExtensionRootManager
from keystone.tests.unit.ksfixtures import database
from keystone import resource
from keystone.contrib.moon.exception import *
from keystone.tests.unit import default_fixtures
-from keystone.contrib.moon.core import LogManager, TenantManager, ADMIN_ID
+from keystone.contrib.moon.core import LogManager, TenantManager
from keystone.tests.moon.unit import *
CONF = cfg.CONF
@@ -38,15 +38,16 @@ class TestIntraExtensionAuthzManagerAuthzOK(tests.TestCase):
def setUp(self):
self.useFixture(database.Database())
super(TestIntraExtensionAuthzManagerAuthzOK, self).setUp()
- self.load_backends()
self.load_fixtures(default_fixtures)
+ self.load_backends()
+ domain = {'id': "default", 'name': "default"}
+ self.resource_api.create_domain(domain['id'], domain)
self.admin = create_user(self, username="admin")
self.demo = create_user(self, username="demo")
- self.root_intra_extension = create_intra_extension(self, policy_model="policy_root")
- # force re-initialization of the ADMIN_ID variable
- from keystone.contrib.moon.core import ADMIN_ID
- self.ADMIN_ID = ADMIN_ID
- self.manager = self.authz_api
+ self.root_intra_extension = self.root_api.get_root_extension_dict()
+ self.root_intra_extension_id = self.root_intra_extension.keys()[0]
+ self.ADMIN_ID = self.root_api.get_root_admin_id()
+ self.authz_manager = self.authz_api
self.admin_manager = self.admin_api
def __get_key_from_value(self, value, values_dict):
@@ -72,7 +73,7 @@ class TestIntraExtensionAuthzManagerAuthzOK(tests.TestCase):
policy_directory=self.policy_directory)
def delete_admin_intra_extension(self):
- self.manager.del_intra_extension(self.ref["id"])
+ self.authz_manager.del_intra_extension(self.ref["id"])
def test_subjects(self):
authz_ie_dict = create_intra_extension(self, "policy_authz")
@@ -80,12 +81,10 @@ class TestIntraExtensionAuthzManagerAuthzOK(tests.TestCase):
tenant, mapping = create_mapping(self, "demo", authz_ie_dict['id'], admin_ie_dict['id'])
admin_subject_id, admin_subject_dict = \
- self.tenant_api.get_subject_dict_from_keystone_name(tenant['id'], admin_ie_dict['id'], 'admin').iteritems().next()
- demo_subject_dict = self.manager.add_subject_dict(admin_subject_id, admin_ie_dict["id"],
- {"name": "demo", "description": "demo"})
+ self.admin_api.get_subject_dict_from_keystone_name(tenant['id'], admin_ie_dict['id'], 'admin').iteritems().next()
demo_subject_id, demo_subject_dict = \
- self.tenant_api.get_subject_dict_from_keystone_name(tenant['id'], admin_ie_dict['id'], 'demo').iteritems().next()
- subjects = self.manager.get_subjects_dict(admin_subject_id, authz_ie_dict["id"])
+ self.admin_api.get_subject_dict_from_keystone_name(tenant['id'], admin_ie_dict['id'], 'demo').iteritems().next()
+ subjects = self.authz_manager.get_subjects_dict(admin_subject_id, authz_ie_dict["id"])
self.assertIsInstance(subjects, dict)
for key, value in subjects.iteritems():
self.assertIsInstance(value, dict)
@@ -110,7 +109,7 @@ class TestIntraExtensionAuthzManagerAuthzOK(tests.TestCase):
# Delete the new subject
self.admin_manager.del_subject(admin_subject_id, authz_ie_dict["id"], new_subject["id"])
- subjects = self.manager.get_subjects_dict(admin_subject_id, authz_ie_dict["id"])
+ subjects = self.authz_manager.get_subjects_dict(admin_subject_id, authz_ie_dict["id"])
for key, value in subjects.iteritems():
self.assertIsInstance(value, dict)
self.assertIn("name", value)
@@ -123,12 +122,10 @@ class TestIntraExtensionAuthzManagerAuthzOK(tests.TestCase):
tenant, mapping = create_mapping(self, "demo", authz_ie_dict['id'], admin_ie_dict['id'])
admin_subject_id, admin_subject_dict = \
- self.tenant_api.get_subject_dict_from_keystone_name(tenant['id'], admin_ie_dict['id'], 'admin').iteritems().next()
- demo_subject_dict = self.manager.add_subject_dict(admin_subject_id, admin_ie_dict["id"],
- {"name": "demo", "description": "demo"})
+ self.admin_api.get_subject_dict_from_keystone_name(tenant['id'], admin_ie_dict['id'], 'admin').iteritems().next()
demo_subject_id, demo_subject_dict = \
- self.tenant_api.get_subject_dict_from_keystone_name(tenant['id'], admin_ie_dict['id'], 'demo').iteritems().next()
- objects = self.manager.get_objects_dict(admin_subject_id, authz_ie_dict["id"])
+ self.admin_api.get_subject_dict_from_keystone_name(tenant['id'], admin_ie_dict['id'], 'demo').iteritems().next()
+ objects = self.authz_manager.get_objects_dict(admin_subject_id, authz_ie_dict["id"])
objects_id_list = []
self.assertIsInstance(objects, dict)
for key, value in objects.iteritems():
@@ -143,12 +140,10 @@ class TestIntraExtensionAuthzManagerAuthzOK(tests.TestCase):
tenant, mapping = create_mapping(self, "demo", authz_ie_dict['id'], admin_ie_dict['id'])
admin_subject_id, admin_subject_dict = \
- self.tenant_api.get_subject_dict_from_keystone_name(tenant['id'], admin_ie_dict['id'], 'admin').iteritems().next()
- demo_subject_dict = self.manager.add_subject_dict(admin_subject_id, admin_ie_dict["id"],
- {"name": "demo", "description": "demo"})
+ self.admin_api.get_subject_dict_from_keystone_name(tenant['id'], admin_ie_dict['id'], 'admin').iteritems().next()
demo_subject_id, demo_subject_dict = \
- self.tenant_api.get_subject_dict_from_keystone_name(tenant['id'], admin_ie_dict['id'], 'demo').iteritems().next()
- actions = self.manager.get_actions_dict(admin_subject_id, authz_ie_dict["id"])
+ self.admin_api.get_subject_dict_from_keystone_name(tenant['id'], admin_ie_dict['id'], 'demo').iteritems().next()
+ actions = self.authz_manager.get_actions_dict(admin_subject_id, authz_ie_dict["id"])
actions_id_list = []
self.assertIsInstance(actions, dict)
for key, value in actions.iteritems():
@@ -163,12 +158,10 @@ class TestIntraExtensionAuthzManagerAuthzOK(tests.TestCase):
tenant, mapping = create_mapping(self, "demo", authz_ie_dict['id'], admin_ie_dict['id'])
admin_subject_id, admin_subject_dict = \
- self.tenant_api.get_subject_dict_from_keystone_name(tenant['id'], admin_ie_dict['id'], 'admin').iteritems().next()
- demo_subject_dict = self.manager.add_subject_dict(admin_subject_id, admin_ie_dict["id"],
- {"name": "demo", "description": "demo"})
+ self.admin_api.get_subject_dict_from_keystone_name(tenant['id'], admin_ie_dict['id'], 'admin').iteritems().next()
demo_subject_id, demo_subject_dict = \
- self.tenant_api.get_subject_dict_from_keystone_name(tenant['id'], admin_ie_dict['id'], 'demo').iteritems().next()
- subject_categories = self.manager.get_subject_categories_dict(admin_subject_id, authz_ie_dict["id"])
+ self.admin_api.get_subject_dict_from_keystone_name(tenant['id'], admin_ie_dict['id'], 'demo').iteritems().next()
+ subject_categories = self.authz_manager.get_subject_categories_dict(admin_subject_id, authz_ie_dict["id"])
self.assertIsInstance(subject_categories, dict)
for key, value in subject_categories.iteritems():
self.assertIsInstance(value, dict)
@@ -190,7 +183,7 @@ class TestIntraExtensionAuthzManagerAuthzOK(tests.TestCase):
# Delete the new subject_category
self.admin_manager.del_subject_category(admin_subject_id, authz_ie_dict["id"], new_subject_category["id"])
- subject_categories = self.manager.get_subject_categories_dict(admin_subject_id, authz_ie_dict["id"])
+ subject_categories = self.authz_manager.get_subject_categories_dict(admin_subject_id, authz_ie_dict["id"])
for key, value in subject_categories.iteritems():
self.assertIsInstance(value, dict)
self.assertIn("name", value)
@@ -203,12 +196,10 @@ class TestIntraExtensionAuthzManagerAuthzOK(tests.TestCase):
tenant, mapping = create_mapping(self, "demo", authz_ie_dict['id'], admin_ie_dict['id'])
admin_subject_id, admin_subject_dict = \
- self.tenant_api.get_subject_dict_from_keystone_name(tenant['id'], admin_ie_dict['id'], 'admin').iteritems().next()
- demo_subject_dict = self.manager.add_subject_dict(admin_subject_id, admin_ie_dict["id"],
- {"name": "demo", "description": "demo"})
+ self.admin_api.get_subject_dict_from_keystone_name(tenant['id'], admin_ie_dict['id'], 'admin').iteritems().next()
demo_subject_id, demo_subject_dict = \
- self.tenant_api.get_subject_dict_from_keystone_name(tenant['id'], admin_ie_dict['id'], 'demo').iteritems().next()
- object_categories = self.manager.get_object_categories_dict(admin_subject_id, authz_ie_dict["id"])
+ self.admin_api.get_subject_dict_from_keystone_name(tenant['id'], admin_ie_dict['id'], 'demo').iteritems().next()
+ object_categories = self.authz_manager.get_object_categories_dict(admin_subject_id, authz_ie_dict["id"])
self.assertIsInstance(object_categories, dict)
for key, value in object_categories.iteritems():
self.assertIsInstance(value, dict)
@@ -231,7 +222,7 @@ class TestIntraExtensionAuthzManagerAuthzOK(tests.TestCase):
# Delete the new object_category
self.admin_manager.del_object_category(admin_subject_id, authz_ie_dict["id"], new_object_category["id"])
- object_categories = self.manager.get_object_categories_dict(admin_subject_id, authz_ie_dict["id"])
+ object_categories = self.authz_manager.get_object_categories_dict(admin_subject_id, authz_ie_dict["id"])
for key, value in object_categories.iteritems():
self.assertIsInstance(value, dict)
self.assertIn("name", value)
@@ -244,12 +235,10 @@ class TestIntraExtensionAuthzManagerAuthzOK(tests.TestCase):
tenant, mapping = create_mapping(self, "demo", authz_ie_dict['id'], admin_ie_dict['id'])
admin_subject_id, admin_subject_dict = \
- self.tenant_api.get_subject_dict_from_keystone_name(tenant['id'], admin_ie_dict['id'], 'admin').iteritems().next()
- demo_subject_dict = self.manager.add_subject_dict(admin_subject_id, admin_ie_dict["id"],
- {"name": "demo", "description": "demo"})
+ self.admin_api.get_subject_dict_from_keystone_name(tenant['id'], admin_ie_dict['id'], 'admin').iteritems().next()
demo_subject_id, demo_subject_dict = \
- self.tenant_api.get_subject_dict_from_keystone_name(tenant['id'], admin_ie_dict['id'], 'demo').iteritems().next()
- action_categories = self.manager.get_action_categories_dict(admin_subject_id, authz_ie_dict["id"])
+ self.admin_api.get_subject_dict_from_keystone_name(tenant['id'], admin_ie_dict['id'], 'demo').iteritems().next()
+ action_categories = self.authz_manager.get_action_categories_dict(admin_subject_id, authz_ie_dict["id"])
self.assertIsInstance(action_categories, dict)
for key, value in action_categories.iteritems():
self.assertIsInstance(value, dict)
@@ -272,7 +261,7 @@ class TestIntraExtensionAuthzManagerAuthzOK(tests.TestCase):
# Delete the new action_category
self.admin_manager.del_action_category(admin_subject_id, authz_ie_dict["id"], new_action_category["id"])
- action_categories = self.manager.get_action_categories_dict(admin_subject_id, authz_ie_dict["id"])
+ action_categories = self.authz_manager.get_action_categories_dict(admin_subject_id, authz_ie_dict["id"])
for key, value in action_categories.iteritems():
self.assertIsInstance(value, dict)
self.assertIn("name", value)
@@ -285,11 +274,9 @@ class TestIntraExtensionAuthzManagerAuthzOK(tests.TestCase):
tenant, mapping = create_mapping(self, "demo", authz_ie_dict['id'], admin_ie_dict['id'])
admin_subject_id, admin_subject_dict = \
- self.tenant_api.get_subject_dict_from_keystone_name(tenant['id'], admin_ie_dict['id'], 'admin').iteritems().next()
- demo_subject_dict = self.manager.add_subject_dict(admin_subject_id, admin_ie_dict["id"],
- {"name": "demo", "description": "demo"})
+ self.admin_api.get_subject_dict_from_keystone_name(tenant['id'], admin_ie_dict['id'], 'admin').iteritems().next()
demo_subject_id, demo_subject_dict = \
- self.tenant_api.get_subject_dict_from_keystone_name(tenant['id'], admin_ie_dict['id'], 'demo').iteritems().next()
+ self.admin_api.get_subject_dict_from_keystone_name(tenant['id'], admin_ie_dict['id'], 'demo').iteritems().next()
subject_categories = self.admin_manager.add_subject_category_dict(
admin_subject_id,
@@ -302,7 +289,7 @@ class TestIntraExtensionAuthzManagerAuthzOK(tests.TestCase):
for subject_category_id in subject_categories:
- subject_category_scope = self.manager.get_subject_scopes_dict(
+ subject_category_scope = self.authz_manager.get_subject_scopes_dict(
admin_subject_id,
authz_ie_dict["id"],
subject_category_id)
@@ -348,11 +335,9 @@ class TestIntraExtensionAuthzManagerAuthzOK(tests.TestCase):
tenant, mapping = create_mapping(self, "demo", authz_ie_dict['id'], admin_ie_dict['id'])
admin_subject_id, admin_subject_dict = \
- self.tenant_api.get_subject_dict_from_keystone_name(tenant['id'], admin_ie_dict['id'], 'admin').iteritems().next()
- demo_subject_dict = self.manager.add_subject_dict(admin_subject_id, admin_ie_dict["id"],
- {"name": "demo", "description": "demo"})
+ self.admin_api.get_subject_dict_from_keystone_name(tenant['id'], admin_ie_dict['id'], 'admin').iteritems().next()
demo_subject_id, demo_subject_dict = \
- self.tenant_api.get_subject_dict_from_keystone_name(tenant['id'], admin_ie_dict['id'], 'demo').iteritems().next()
+ self.admin_api.get_subject_dict_from_keystone_name(tenant['id'], admin_ie_dict['id'], 'demo').iteritems().next()
object_categories = self.admin_manager.add_object_category_dict(
admin_subject_id,
@@ -365,7 +350,7 @@ class TestIntraExtensionAuthzManagerAuthzOK(tests.TestCase):
for object_category_id in object_categories:
- object_category_scope = self.manager.get_object_scopes_dict(
+ object_category_scope = self.authz_manager.get_object_scopes_dict(
admin_subject_id,
authz_ie_dict["id"],
object_category_id)
@@ -411,11 +396,9 @@ class TestIntraExtensionAuthzManagerAuthzOK(tests.TestCase):
tenant, mapping = create_mapping(self, "demo", authz_ie_dict['id'], admin_ie_dict['id'])
admin_subject_id, admin_subject_dict = \
- self.tenant_api.get_subject_dict_from_keystone_name(tenant['id'], admin_ie_dict['id'], 'admin').iteritems().next()
- demo_subject_dict = self.manager.add_subject_dict(admin_subject_id, admin_ie_dict["id"],
- {"name": "demo", "description": "demo"})
+ self.admin_api.get_subject_dict_from_keystone_name(tenant['id'], admin_ie_dict['id'], 'admin').iteritems().next()
demo_subject_id, demo_subject_dict = \
- self.tenant_api.get_subject_dict_from_keystone_name(tenant['id'], admin_ie_dict['id'], 'demo').iteritems().next()
+ self.admin_api.get_subject_dict_from_keystone_name(tenant['id'], admin_ie_dict['id'], 'demo').iteritems().next()
action_categories = self.admin_manager.add_action_category_dict(
admin_subject_id,
@@ -428,7 +411,7 @@ class TestIntraExtensionAuthzManagerAuthzOK(tests.TestCase):
for action_category_id in action_categories:
- action_category_scope = self.manager.get_action_scopes_dict(
+ action_category_scope = self.authz_manager.get_action_scopes_dict(
admin_subject_id,
authz_ie_dict["id"],
action_category_id)
@@ -474,17 +457,15 @@ class TestIntraExtensionAuthzManagerAuthzOK(tests.TestCase):
tenant, mapping = create_mapping(self, "demo", authz_ie_dict['id'], admin_ie_dict['id'])
admin_subject_id, admin_subject_dict = \
- self.tenant_api.get_subject_dict_from_keystone_name(tenant['id'], admin_ie_dict['id'], 'admin').iteritems().next()
+ self.admin_api.get_subject_dict_from_keystone_name(tenant['id'], admin_ie_dict['id'], 'admin').iteritems().next()
admin_authz_subject_id, admin_authz_subject_dict = \
- self.tenant_api.get_subject_dict_from_keystone_name(tenant['id'], authz_ie_dict['id'], 'admin').iteritems().next()
- demo_subject_dict = self.manager.add_subject_dict(admin_subject_id, admin_ie_dict["id"],
- {"name": "demo", "description": "demo"})
+ self.admin_api.get_subject_dict_from_keystone_name(tenant['id'], authz_ie_dict['id'], 'admin').iteritems().next()
demo_subject_id, demo_subject_dict = \
- self.tenant_api.get_subject_dict_from_keystone_name(tenant['id'], admin_ie_dict['id'], 'demo').iteritems().next()
+ self.admin_api.get_subject_dict_from_keystone_name(tenant['id'], admin_ie_dict['id'], 'demo').iteritems().next()
demo_authz_subject_id, demo_authz_subject_dict = \
- self.tenant_api.get_subject_dict_from_keystone_name(tenant['id'], authz_ie_dict['id'], 'demo').iteritems().next()
+ self.admin_api.get_subject_dict_from_keystone_name(tenant['id'], authz_ie_dict['id'], 'demo').iteritems().next()
- subjects_dict = self.manager.get_subjects_dict(admin_subject_id, authz_ie_dict["id"])
+ subjects_dict = self.authz_manager.get_subjects_dict(admin_subject_id, authz_ie_dict["id"])
subject_categories = self.admin_manager.add_subject_category_dict(
admin_subject_id,
@@ -496,7 +477,7 @@ class TestIntraExtensionAuthzManagerAuthzOK(tests.TestCase):
)
for subject_category_id in subject_categories:
- subject_category_scope = self.manager.get_subject_scopes_dict(
+ subject_category_scope = self.authz_manager.get_subject_scopes_dict(
admin_subject_id,
authz_ie_dict["id"],
subject_category_id)
@@ -527,7 +508,7 @@ class TestIntraExtensionAuthzManagerAuthzOK(tests.TestCase):
new_subject_category_scope_2)
subject_category_scope_2_id = subject_category_scope_2.keys()[0]
- subject_category_assignments = self.manager.get_subject_assignment_list(
+ subject_category_assignments = self.authz_manager.get_subject_assignment_list(
admin_subject_id,
authz_ie_dict["id"],
admin_authz_subject_id,
@@ -536,7 +517,7 @@ class TestIntraExtensionAuthzManagerAuthzOK(tests.TestCase):
self.assertIsInstance(subject_category_assignments, list)
self.assertEqual([], subject_category_assignments)
- subject_category_assignments = self.manager.get_subject_assignment_list(
+ subject_category_assignments = self.authz_manager.get_subject_assignment_list(
admin_subject_id,
authz_ie_dict["id"],
demo_authz_subject_id,
@@ -597,13 +578,11 @@ class TestIntraExtensionAuthzManagerAuthzOK(tests.TestCase):
tenant, mapping = create_mapping(self, "demo", authz_ie_dict['id'], admin_ie_dict['id'])
admin_subject_id, admin_subject_dict = \
- self.tenant_api.get_subject_dict_from_keystone_name(tenant['id'], admin_ie_dict['id'], 'admin').iteritems().next()
- demo_subject_dict = self.manager.add_subject_dict(admin_subject_id, admin_ie_dict["id"],
- {"name": "demo", "description": "demo"})
+ self.admin_api.get_subject_dict_from_keystone_name(tenant['id'], admin_ie_dict['id'], 'admin').iteritems().next()
demo_subject_id, demo_subject_dict = \
- self.tenant_api.get_subject_dict_from_keystone_name(tenant['id'], admin_ie_dict['id'], 'demo').iteritems().next()
+ self.admin_api.get_subject_dict_from_keystone_name(tenant['id'], admin_ie_dict['id'], 'demo').iteritems().next()
- objects_dict = self.manager.get_objects_dict(admin_subject_id, authz_ie_dict["id"])
+ objects_dict = self.authz_manager.get_objects_dict(admin_subject_id, authz_ie_dict["id"])
object_vm1_id = None
object_vm2_id = None
@@ -625,7 +604,7 @@ class TestIntraExtensionAuthzManagerAuthzOK(tests.TestCase):
)
for object_category_id in object_categories:
- object_category_scope = self.manager.get_object_scopes_dict(
+ object_category_scope = self.authz_manager.get_object_scopes_dict(
admin_subject_id,
authz_ie_dict["id"],
object_category_id)
@@ -656,7 +635,7 @@ class TestIntraExtensionAuthzManagerAuthzOK(tests.TestCase):
new_object_category_scope_2)
object_category_scope_2_id = object_category_scope_2.keys()[0]
- object_category_assignments = self.manager.get_object_assignment_list(
+ object_category_assignments = self.authz_manager.get_object_assignment_list(
admin_subject_id,
authz_ie_dict["id"],
object_vm1_id,
@@ -665,7 +644,7 @@ class TestIntraExtensionAuthzManagerAuthzOK(tests.TestCase):
self.assertIsInstance(object_category_assignments, list)
self.assertEqual([], object_category_assignments)
- object_category_assignments = self.manager.get_object_assignment_list(
+ object_category_assignments = self.authz_manager.get_object_assignment_list(
admin_subject_id,
authz_ie_dict["id"],
object_vm2_id,
@@ -726,13 +705,11 @@ class TestIntraExtensionAuthzManagerAuthzOK(tests.TestCase):
tenant, mapping = create_mapping(self, "demo", authz_ie_dict['id'], admin_ie_dict['id'])
admin_subject_id, admin_subject_dict = \
- self.tenant_api.get_subject_dict_from_keystone_name(tenant['id'], admin_ie_dict['id'], 'admin').iteritems().next()
- demo_subject_dict = self.manager.add_subject_dict(admin_subject_id, admin_ie_dict["id"],
- {"name": "demo", "description": "demo"})
+ self.admin_api.get_subject_dict_from_keystone_name(tenant['id'], admin_ie_dict['id'], 'admin').iteritems().next()
demo_subject_id, demo_subject_dict = \
- self.tenant_api.get_subject_dict_from_keystone_name(tenant['id'], admin_ie_dict['id'], 'demo').iteritems().next()
+ self.admin_api.get_subject_dict_from_keystone_name(tenant['id'], admin_ie_dict['id'], 'demo').iteritems().next()
- actions_dict = self.manager.get_actions_dict(admin_subject_id, authz_ie_dict["id"])
+ actions_dict = self.authz_manager.get_actions_dict(admin_subject_id, authz_ie_dict["id"])
action_upload_id = None
action_list_id = None
@@ -754,7 +731,7 @@ class TestIntraExtensionAuthzManagerAuthzOK(tests.TestCase):
)
for action_category_id in action_categories:
- action_category_scope = self.manager.get_action_scopes_dict(
+ action_category_scope = self.authz_manager.get_action_scopes_dict(
admin_subject_id,
authz_ie_dict["id"],
action_category_id)
@@ -785,7 +762,7 @@ class TestIntraExtensionAuthzManagerAuthzOK(tests.TestCase):
new_action_category_scope_2)
action_category_scope_2_id = action_category_scope_2.keys()[0]
- action_category_assignments = self.manager.get_action_assignment_list(
+ action_category_assignments = self.authz_manager.get_action_assignment_list(
admin_subject_id,
authz_ie_dict["id"],
action_upload_id,
@@ -794,7 +771,7 @@ class TestIntraExtensionAuthzManagerAuthzOK(tests.TestCase):
self.assertIsInstance(action_category_assignments, list)
self.assertEqual([], action_category_assignments)
- action_category_assignments = self.manager.get_action_assignment_list(
+ action_category_assignments = self.authz_manager.get_action_assignment_list(
admin_subject_id,
authz_ie_dict["id"],
action_list_id,
@@ -855,11 +832,9 @@ class TestIntraExtensionAuthzManagerAuthzOK(tests.TestCase):
tenant, mapping = create_mapping(self, "demo", authz_ie_dict['id'], admin_ie_dict['id'])
admin_subject_id, admin_subject_dict = \
- self.tenant_api.get_subject_dict_from_keystone_name(tenant['id'], admin_ie_dict['id'], 'admin').iteritems().next()
- demo_subject_dict = self.admin_manager.add_subject_dict(admin_subject_id, admin_ie_dict["id"],
- {"name": "demo", "description": "demo"})
+ self.admin_api.get_subject_dict_from_keystone_name(tenant['id'], admin_ie_dict['id'], 'admin').iteritems().next()
demo_subject_id, demo_subject_dict = \
- self.tenant_api.get_subject_dict_from_keystone_name(tenant['id'], admin_ie_dict['id'], 'demo').iteritems().next()
+ self.admin_api.get_subject_dict_from_keystone_name(tenant['id'], admin_ie_dict['id'], 'demo').iteritems().next()
aggregation_algorithms = self.admin_manager.get_aggregation_algorithm_dict(admin_subject_id, authz_ie_dict["id"])
for key, value in aggregation_algorithms.iteritems():
@@ -897,11 +872,9 @@ class TestIntraExtensionAuthzManagerAuthzOK(tests.TestCase):
tenant, mapping = create_mapping(self, "demo", authz_ie_dict['id'], admin_ie_dict['id'])
admin_subject_id, admin_subject_dict = \
- self.tenant_api.get_subject_dict_from_keystone_name(tenant['id'], admin_ie_dict['id'], 'admin').iteritems().next()
- demo_subject_dict = self.admin_manager.add_subject_dict(admin_subject_id, admin_ie_dict["id"],
- {"name": "demo", "description": "demo"})
+ self.admin_api.get_subject_dict_from_keystone_name(tenant['id'], admin_ie_dict['id'], 'admin').iteritems().next()
demo_subject_id, demo_subject_dict = \
- self.tenant_api.get_subject_dict_from_keystone_name(tenant['id'], admin_ie_dict['id'], 'demo').iteritems().next()
+ self.admin_api.get_subject_dict_from_keystone_name(tenant['id'], admin_ie_dict['id'], 'demo').iteritems().next()
sub_meta_rules = self.admin_manager.get_sub_meta_rules_dict(admin_subject_id, authz_ie_dict["id"])
self.assertIsInstance(sub_meta_rules, dict)
@@ -969,23 +942,28 @@ class TestIntraExtensionAuthzManagerAuthzOK(tests.TestCase):
# TODO: add test for the delete function
-@dependency.requires('admin_api', 'authz_api', 'tenant_api', 'configuration_api', 'moonlog_api')
+@dependency.requires('admin_api', 'authz_api', 'tenant_api', 'configuration_api', 'moonlog_api', 'identity_api', 'root_api')
class TestIntraExtensionAuthzManagerAuthzKO(tests.TestCase):
def setUp(self):
self.useFixture(database.Database())
super(TestIntraExtensionAuthzManagerAuthzKO, self).setUp()
- self.load_backends()
self.load_fixtures(default_fixtures)
+ self.load_backends()
+ domain = {'id': "default", 'name': "default"}
+ self.resource_api.create_domain(domain['id'], domain)
self.admin = create_user(self, username="admin")
self.demo = create_user(self, username="demo")
- self.root_intra_extension = create_intra_extension(self, policy_model="policy_root")
- # force re-initialization of the ADMIN_ID variable
- from keystone.contrib.moon.core import ADMIN_ID
- self.ADMIN_ID = ADMIN_ID
- self.manager = self.authz_api
+ self.root_intra_extension = self.root_api.get_root_extension_dict()
+ self.root_intra_extension_id = self.root_intra_extension.keys()[0]
+ self.ADMIN_ID = self.root_api.get_root_admin_id()
+ self.authz_manager = self.authz_api
self.admin_manager = self.admin_api
+ def tearDown(self):
+ # self.admin_manager.del_intra_extension(self.ADMIN_ID, self.root_intra_extension["id"])
+ tests.TestCase.tearDown(self)
+
def __get_key_from_value(self, value, values_dict):
return filter(lambda v: v[1] == value, values_dict.iteritems())[0][0]
@@ -995,70 +973,41 @@ class TestIntraExtensionAuthzManagerAuthzKO(tests.TestCase):
"tenant_api": TenantManager(),
"admin_api": IntraExtensionAdminManager(),
"authz_api": IntraExtensionAuthzManager(),
+ "root_api": IntraExtensionRootManager(),
# "resource_api": resource.Manager(),
}
def config_overrides(self):
super(TestIntraExtensionAuthzManagerAuthzKO, self).config_overrides()
self.policy_directory = 'examples/moon/policies'
+ self.root_policy_directory = 'policy_root'
self.config_fixture.config(
group='moon',
intraextension_driver='keystone.contrib.moon.backends.sql.IntraExtensionConnector')
self.config_fixture.config(
group='moon',
policy_directory=self.policy_directory)
-
- def test_tenant_exceptions(self):
- self.assertRaises(
- TenantUnknown,
- self.manager.get_tenant_dict
- )
- self.assertRaises(
- TenantUnknown,
- self.manager.get_tenant_name,
- uuid.uuid4().hex
- )
- self.assertRaises(
- TenantUnknown,
- self.manager.set_tenant_name,
- uuid.uuid4().hex, uuid.uuid4().hex
- )
- self.assertRaises(
- TenantUnknown,
- self.manager.get_extension_uuid,
- uuid.uuid4().hex, "authz"
- )
- self.assertRaises(
- TenantUnknown,
- self.manager.get_extension_uuid,
- uuid.uuid4().hex, "admin"
- )
-
- def test_intra_extension_exceptions(self):
-
- tenant = self.create_tenant()
- self.assertRaises(
- IntraExtensionUnknown,
- self.manager.get_extension_uuid,
- tenant["id"], "authz"
- )
- self.assertRaises(
- IntraExtensionUnknown,
- self.manager.get_extension_uuid,
- tenant["id"], "admin"
- )
- # TODO
+ self.config_fixture.config(
+ group='moon',
+ root_policy_directory=self.root_policy_directory)
def test_delete_admin_intra_extension(self):
+ authz_ie_dict = create_intra_extension(self, "policy_authz")
+ admin_ie_dict = create_intra_extension(self, "policy_admin")
+ tenant, mapping = create_mapping(self, "demo", authz_ie_dict['id'], admin_ie_dict['id'])
+
+ admin_subject_id, admin_subject_dict = \
+ self.admin_api.get_subject_dict_from_keystone_name(tenant['id'], admin_ie_dict['id'], 'admin').iteritems().next()
self.assertRaises(
- AdminException,
- self.manager.del_intra_extension,
- self.ref["id"])
+ SubjectUnknown,
+ self.authz_manager.del_intra_extension,
+ uuid.uuid4().hex,
+ admin_ie_dict["id"])
def test_authz_exceptions(self):
self.assertRaises(
TenantUnknown,
- self.manager.authz,
+ self.authz_manager.authz,
uuid.uuid4().hex, uuid.uuid4().hex, uuid.uuid4().hex, uuid.uuid4().hex
)
@@ -1067,19 +1016,17 @@ class TestIntraExtensionAuthzManagerAuthzKO(tests.TestCase):
tenant, mapping = create_mapping(self, "demo", authz_ie_dict['id'], admin_ie_dict['id'])
admin_subject_id, admin_subject_dict = \
- self.tenant_api.get_subject_dict_from_keystone_name(tenant['id'], admin_ie_dict['id'], 'admin').iteritems().next()
+ self.admin_api.get_subject_dict_from_keystone_name(tenant['id'], admin_ie_dict['id'], 'admin').iteritems().next()
# Test when subject is unknown
self.assertRaises(
SubjectUnknown,
- self.manager.authz,
+ self.authz_manager.authz,
tenant["name"], uuid.uuid4().hex, uuid.uuid4().hex, uuid.uuid4().hex
)
# Test when subject is known but not the object
- demo_subject_dict = self.manager.add_subject_dict(admin_subject_id, admin_ie_dict["id"],
- {"name": "demo", "description": "demo"})
demo_subject_id, demo_subject_dict = \
- self.tenant_api.get_subject_dict_from_keystone_name(tenant['id'], authz_ie_dict['id'], 'demo').iteritems().next()
+ self.admin_api.get_subject_dict_from_keystone_name(tenant['id'], authz_ie_dict['id'], 'demo').iteritems().next()
# self.manager.add_subject_dict(
# admin_subject_id,
@@ -1089,13 +1036,13 @@ class TestIntraExtensionAuthzManagerAuthzKO(tests.TestCase):
self.assertRaises(
ObjectUnknown,
- self.manager.authz,
+ self.authz_manager.authz,
tenant["name"], demo_subject_dict["name"], uuid.uuid4().hex, uuid.uuid4().hex
)
# Test when subject and object are known but not the action
my_object = {"name": "my_object", "description": "my_object description"}
- _tmp = self.manager.add_object_dict(
+ _tmp = self.admin_manager.add_object_dict(
admin_subject_id,
authz_ie_dict["id"],
my_object
@@ -1104,13 +1051,13 @@ class TestIntraExtensionAuthzManagerAuthzKO(tests.TestCase):
self.assertRaises(
ActionUnknown,
- self.manager.authz,
+ self.authz_manager.authz,
tenant["name"], demo_subject_dict["name"], my_object["name"], uuid.uuid4().hex
)
# Test when subject and object and action are known
my_action = {"name": "my_action", "description": "my_action description"}
- _tmp = self.manager.add_action_dict(
+ _tmp = self.admin_manager.add_action_dict(
admin_subject_id,
authz_ie_dict["id"],
my_action
@@ -1119,13 +1066,13 @@ class TestIntraExtensionAuthzManagerAuthzKO(tests.TestCase):
self.assertRaises(
AuthzException,
- self.manager.authz,
+ self.authz_manager.authz,
tenant["name"], demo_subject_dict["name"], my_object["name"], my_action["name"]
)
# Add a subject scope and test ObjectCategoryAssignmentOutOfScope
my_subject_category = {"name": "my_subject_category", "description": "my_subject_category description"}
- _tmp = self.manager.add_subject_category_dict(
+ _tmp = self.admin_manager.add_subject_category_dict(
admin_subject_id,
authz_ie_dict["id"],
my_subject_category
@@ -1133,7 +1080,7 @@ class TestIntraExtensionAuthzManagerAuthzKO(tests.TestCase):
my_subject_category["id"] = _tmp.keys()[0]
my_subject_scope = {"name": "my_subject_scope", "description": "my_subject_scope description"}
- _tmp = self.manager.add_subject_scope_dict(
+ _tmp = self.admin_manager.add_subject_scope_dict(
admin_subject_id,
authz_ie_dict["id"],
my_subject_category["id"],
@@ -1143,13 +1090,13 @@ class TestIntraExtensionAuthzManagerAuthzKO(tests.TestCase):
self.assertRaises(
AuthzException,
- self.manager.authz,
+ self.authz_manager.authz,
tenant["name"], demo_subject_dict["name"], my_object["name"], my_action["name"]
)
# Add an object scope and test ActionCategoryAssignmentOutOfScope
my_object_category = {"name": "my_object_category", "description": "my_object_category description"}
- _tmp = self.manager.add_object_category_dict(
+ _tmp = self.admin_manager.add_object_category_dict(
admin_subject_id,
authz_ie_dict["id"],
my_object_category
@@ -1157,7 +1104,7 @@ class TestIntraExtensionAuthzManagerAuthzKO(tests.TestCase):
my_object_category["id"] = _tmp.keys()[0]
my_object_scope = {"name": "my_object_scope", "description": "my_object_scope description"}
- _tmp = self.manager.add_object_scope_dict(
+ _tmp = self.admin_manager.add_object_scope_dict(
admin_subject_id,
authz_ie_dict["id"],
my_object_category["id"],
@@ -1167,13 +1114,13 @@ class TestIntraExtensionAuthzManagerAuthzKO(tests.TestCase):
self.assertRaises(
AuthzException,
- self.manager.authz,
+ self.authz_manager.authz,
tenant["name"], demo_subject_dict["name"], my_object["name"], my_action["name"]
)
# Add an action scope and test SubjectCategoryAssignmentUnknown
my_action_category = {"name": "my_action_category", "description": "my_action_category description"}
- _tmp = self.manager.add_action_category_dict(
+ _tmp = self.admin_manager.add_action_category_dict(
admin_subject_id,
authz_ie_dict["id"],
my_action_category
@@ -1181,7 +1128,7 @@ class TestIntraExtensionAuthzManagerAuthzKO(tests.TestCase):
my_action_category["id"] = _tmp.keys()[0]
my_action_scope = {"name": "my_action_scope", "description": "my_action_scope description"}
- _tmp = self.manager.add_action_scope_dict(
+ _tmp = self.admin_manager.add_action_scope_dict(
admin_subject_id,
authz_ie_dict["id"],
my_action_category["id"],
@@ -1191,12 +1138,12 @@ class TestIntraExtensionAuthzManagerAuthzKO(tests.TestCase):
self.assertRaises(
AuthzException,
- self.manager.authz,
+ self.authz_manager.authz,
tenant["name"], demo_subject_dict["name"], my_object["name"], my_action["name"]
)
# Add a subject assignment and test ObjectCategoryAssignmentUnknown
- self.manager.add_subject_assignment_list(
+ self.admin_manager.add_subject_assignment_list(
admin_subject_id,
authz_ie_dict["id"],
demo_subject_id,
@@ -1206,12 +1153,12 @@ class TestIntraExtensionAuthzManagerAuthzKO(tests.TestCase):
self.assertRaises(
AuthzException,
- self.manager.authz,
+ self.authz_manager.authz,
tenant["name"], demo_subject_dict["name"], my_object["name"], my_action["name"]
)
# Add an object assignment and test ActionCategoryAssignmentUnknown
- self.manager.add_object_assignment_list(
+ self.admin_manager.add_object_assignment_list(
admin_subject_id,
authz_ie_dict["id"],
my_object["id"],
@@ -1221,12 +1168,12 @@ class TestIntraExtensionAuthzManagerAuthzKO(tests.TestCase):
self.assertRaises(
AuthzException,
- self.manager.authz,
+ self.authz_manager.authz,
tenant["name"], demo_subject_dict["name"], my_object["name"], my_action["name"]
)
# Add an action assignment and test RuleUnknown
- self.manager.add_action_assignment_list(
+ self.admin_manager.add_action_assignment_list(
admin_subject_id,
authz_ie_dict["id"],
my_action["id"],
@@ -1236,7 +1183,7 @@ class TestIntraExtensionAuthzManagerAuthzKO(tests.TestCase):
self.assertRaises(
AuthzException,
- self.manager.authz,
+ self.authz_manager.authz,
tenant["name"], admin_subject_dict["name"], my_object["name"], my_action["name"]
)
@@ -1248,15 +1195,15 @@ class TestIntraExtensionAuthzManagerAuthzKO(tests.TestCase):
"action_categories": [my_action_category["id"], ],
"object_categories": [my_object_category["id"], ]
}
- print("my_meta_rule", my_meta_rule)
- sub_meta_rules_dict = self.manager.get_sub_meta_rules_dict(
+ sub_meta_rules_dict = self.authz_manager.get_sub_meta_rules_dict(
admin_subject_id,
authz_ie_dict["id"]
)
+ print("authz_ie_dict[\"id\"]", authz_ie_dict["id"])
self.assertRaises(
SubMetaRuleAlgorithmNotExisting,
- self.manager.add_sub_meta_rule_dict,
+ self.admin_manager.add_sub_meta_rule_dict,
admin_subject_id,
authz_ie_dict["id"],
my_meta_rule
@@ -1264,19 +1211,31 @@ class TestIntraExtensionAuthzManagerAuthzKO(tests.TestCase):
# TODO: the next request should be called with demo_subject_id
# but the demo user has no right in the root intra_extension
- algorithms = self.configuration_api.get_sub_meta_rule_algorithms_dict(admin_subject_id)
- for algorithm_id in algorithms:
- if algorithms[algorithm_id]["name"] == "inclusion":
- my_meta_rule["algorithm"] = algorithm_id
+ # algorithms = self.configuration_api.get_sub_meta_rule_algorithms_dict(admin_subject_id)
+ # for algorithm_id in algorithms:
+ # if algorithms[algorithm_id]["name"] == "inclusion":
+ # my_meta_rule["algorithm"] = algorithm_id
+ my_meta_rule['algorithm'] = 'inclusion'
- sub_meta_rule = self.manager.add_sub_meta_rule_dict(
+ sub_meta_rule = self.admin_manager.add_sub_meta_rule_dict(
admin_subject_id,
authz_ie_dict["id"],
my_meta_rule
)
- sub_meta_rule_id, sub_meta_rule_dict = sub_meta_rule.iteritems().next()
-
- rule = self.manager.add_rule_dict(
+ sub_meta_rule_id, sub_meta_rule_dict = None, None
+ for key, value in sub_meta_rule.iteritems():
+ if value["name"] == my_meta_rule["name"]:
+ sub_meta_rule_id, sub_meta_rule_dict = key, value
+ break
+
+ aggregation_algorithms = self.configuration_api.get_aggregation_algorithms_dict(admin_subject_id)
+ for _id in aggregation_algorithms:
+ if aggregation_algorithms[_id]["name"] == "one_true":
+ agg = self.admin_manager.set_aggregation_algorithm_dict(admin_subject_id, authz_ie_dict["id"],
+ _id,
+ aggregation_algorithms[_id])
+
+ rule = self.admin_manager.add_rule_dict(
admin_subject_id,
authz_ie_dict["id"],
sub_meta_rule_id,
@@ -1285,11 +1244,11 @@ class TestIntraExtensionAuthzManagerAuthzKO(tests.TestCase):
self.assertRaises(
AuthzException,
- self.manager.authz,
+ self.authz_manager.authz,
tenant["name"], admin_subject_dict["name"], my_object["name"], my_action["name"]
)
- result = self.manager.authz(tenant["name"], demo_subject_dict["name"], my_object["name"], my_action["name"])
+ result = self.authz_manager.authz(tenant["name"], demo_subject_dict["name"], my_object["name"], my_action["name"])
self.assertEqual(True, result)
def test_subjects(self):
@@ -1298,12 +1257,10 @@ class TestIntraExtensionAuthzManagerAuthzKO(tests.TestCase):
tenant, mapping = create_mapping(self, "demo", authz_ie_dict['id'], admin_ie_dict['id'])
admin_subject_id, admin_subject_dict = \
- self.tenant_api.get_subject_dict_from_keystone_name(tenant['id'], admin_ie_dict['id'], 'admin').iteritems().next()
- demo_subject_dict = self.manager.add_subject_dict(admin_subject_id, admin_ie_dict["id"],
- {"name": "demo", "description": "demo"})
+ self.admin_api.get_subject_dict_from_keystone_name(tenant['id'], admin_ie_dict['id'], 'admin').iteritems().next()
demo_subject_id, demo_subject_dict = \
- self.tenant_api.get_subject_dict_from_keystone_name(tenant['id'], admin_ie_dict['id'], 'demo').iteritems().next()
- subjects = self.manager.get_subjects_dict(admin_subject_id, authz_ie_dict["id"])
+ self.admin_api.get_subject_dict_from_keystone_name(tenant['id'], admin_ie_dict['id'], 'demo').iteritems().next()
+ subjects = self.authz_manager.get_subjects_dict(admin_subject_id, authz_ie_dict["id"])
self.assertIsInstance(subjects, dict)
for key, value in subjects.iteritems():
self.assertIsInstance(value, dict)
@@ -1316,7 +1273,7 @@ class TestIntraExtensionAuthzManagerAuthzKO(tests.TestCase):
new_subject = {"name": "subject_test", "description": "subject_test"}
self.assertRaises(
AuthzException,
- self.manager.add_subject_dict,
+ self.admin_manager.add_subject_dict,
demo_subject_id, admin_ie_dict["id"], new_subject)
subjects = self.admin_manager.add_subject_dict(admin_subject_id, authz_ie_dict["id"], new_subject)
@@ -1333,11 +1290,11 @@ class TestIntraExtensionAuthzManagerAuthzKO(tests.TestCase):
# Delete the new subject
self.assertRaises(
AuthzException,
- self.manager.del_subject,
+ self.authz_manager.del_subject,
demo_subject_id, authz_ie_dict["id"], new_subject["id"])
self.admin_manager.del_subject(admin_subject_id, authz_ie_dict["id"], new_subject["id"])
- subjects = self.manager.get_subjects_dict(admin_subject_id, authz_ie_dict["id"])
+ subjects = self.authz_manager.get_subjects_dict(admin_subject_id, authz_ie_dict["id"])
for key, value in subjects.iteritems():
self.assertIsInstance(value, dict)
self.assertIn("name", value)
@@ -1350,12 +1307,10 @@ class TestIntraExtensionAuthzManagerAuthzKO(tests.TestCase):
tenant, mapping = create_mapping(self, "demo", authz_ie_dict['id'], admin_ie_dict['id'])
admin_subject_id, admin_subject_dict = \
- self.tenant_api.get_subject_dict_from_keystone_name(tenant['id'], admin_ie_dict['id'], 'admin').iteritems().next()
- demo_subject_dict = self.manager.add_subject_dict(admin_subject_id, admin_ie_dict["id"],
- {"name": "demo", "description": "demo"})
+ self.admin_api.get_subject_dict_from_keystone_name(tenant['id'], admin_ie_dict['id'], 'admin').iteritems().next()
demo_subject_id, demo_subject_dict = \
- self.tenant_api.get_subject_dict_from_keystone_name(tenant['id'], admin_ie_dict['id'], 'demo').iteritems().next()
- objects = self.manager.get_objects_dict(admin_subject_id, authz_ie_dict["id"])
+ self.admin_api.get_subject_dict_from_keystone_name(tenant['id'], admin_ie_dict['id'], 'demo').iteritems().next()
+ objects = self.authz_manager.get_objects_dict(admin_subject_id, authz_ie_dict["id"])
objects_id_list = []
self.assertIsInstance(objects, dict)
for key, value in objects.iteritems():
@@ -1364,39 +1319,39 @@ class TestIntraExtensionAuthzManagerAuthzKO(tests.TestCase):
self.assertIn("name", value)
self.assertIn("description", value)
- create_user(self, "subject_test")
+ # create_user(self, "subject_test")
new_object = {"name": "object_test", "description": "object_test"}
self.assertRaises(
AuthzException,
- self.manager.add_object_dict,
+ self.authz_manager.add_object_dict,
demo_subject_id, admin_ie_dict["id"], new_object)
self.assertRaises(
ObjectsWriteNoAuthorized,
self.admin_manager.add_object_dict,
- admin_subject_id, authz_ie_dict["id"], new_object
+ admin_subject_id, admin_ie_dict["id"], new_object
)
# Delete the new object
for key in objects_id_list:
self.assertRaises(
AuthzException,
- self.manager.del_object,
+ self.authz_manager.del_object,
demo_subject_id, authz_ie_dict["id"], key)
self.assertRaises(
AuthzException,
- self.manager.del_object,
+ self.authz_manager.del_object,
admin_subject_id, authz_ie_dict["id"], key)
for key in objects_id_list:
self.assertRaises(
ObjectsWriteNoAuthorized,
self.admin_manager.del_object,
- demo_subject_id, authz_ie_dict["id"], key)
+ demo_subject_id, admin_ie_dict["id"], key)
self.assertRaises(
ObjectsWriteNoAuthorized,
self.admin_manager.del_object,
- admin_subject_id, authz_ie_dict["id"], key)
+ admin_subject_id, admin_ie_dict["id"], key)
def test_actions(self):
authz_ie_dict = create_intra_extension(self, "policy_authz")
@@ -1404,12 +1359,10 @@ class TestIntraExtensionAuthzManagerAuthzKO(tests.TestCase):
tenant, mapping = create_mapping(self, "demo", authz_ie_dict['id'], admin_ie_dict['id'])
admin_subject_id, admin_subject_dict = \
- self.tenant_api.get_subject_dict_from_keystone_name(tenant['id'], admin_ie_dict['id'], 'admin').iteritems().next()
- demo_subject_dict = self.manager.add_subject_dict(admin_subject_id, admin_ie_dict["id"],
- {"name": "demo", "description": "demo"})
+ self.admin_api.get_subject_dict_from_keystone_name(tenant['id'], admin_ie_dict['id'], 'admin').iteritems().next()
demo_subject_id, demo_subject_dict = \
- self.tenant_api.get_subject_dict_from_keystone_name(tenant['id'], admin_ie_dict['id'], 'demo').iteritems().next()
- actions = self.manager.get_actions_dict(admin_subject_id, authz_ie_dict["id"])
+ self.admin_api.get_subject_dict_from_keystone_name(tenant['id'], admin_ie_dict['id'], 'demo').iteritems().next()
+ actions = self.authz_manager.get_actions_dict(admin_subject_id, authz_ie_dict["id"])
actions_id_list = []
self.assertIsInstance(actions, dict)
for key, value in actions.iteritems():
@@ -1422,35 +1375,35 @@ class TestIntraExtensionAuthzManagerAuthzKO(tests.TestCase):
new_action = {"name": "action_test", "description": "action_test"}
self.assertRaises(
AuthzException,
- self.manager.add_action_dict,
+ self.authz_manager.add_action_dict,
demo_subject_id, admin_ie_dict["id"], new_action)
self.assertRaises(
ActionsWriteNoAuthorized,
self.admin_manager.add_action_dict,
- admin_subject_id, authz_ie_dict["id"], new_action
+ admin_subject_id, admin_ie_dict["id"], new_action
)
# Delete all actions
for key in actions_id_list:
self.assertRaises(
AuthzException,
- self.manager.del_action,
+ self.authz_manager.del_action,
demo_subject_id, authz_ie_dict["id"], key)
self.assertRaises(
AuthzException,
- self.manager.del_action,
+ self.authz_manager.del_action,
admin_subject_id, authz_ie_dict["id"], key)
for key in actions_id_list:
self.assertRaises(
ActionsWriteNoAuthorized,
self.admin_manager.del_action,
- demo_subject_id, authz_ie_dict["id"], key)
+ demo_subject_id, admin_ie_dict["id"], key)
self.assertRaises(
ActionsWriteNoAuthorized,
self.admin_manager.del_action,
- admin_subject_id, authz_ie_dict["id"], key)
+ admin_subject_id, admin_ie_dict["id"], key)
def test_subject_categories(self):
authz_ie_dict = create_intra_extension(self, "policy_authz")
@@ -1458,12 +1411,10 @@ class TestIntraExtensionAuthzManagerAuthzKO(tests.TestCase):
tenant, mapping = create_mapping(self, "demo", authz_ie_dict['id'], admin_ie_dict['id'])
admin_subject_id, admin_subject_dict = \
- self.tenant_api.get_subject_dict_from_keystone_name(tenant['id'], admin_ie_dict['id'], 'admin').iteritems().next()
- demo_subject_dict = self.manager.add_subject_dict(admin_subject_id, admin_ie_dict["id"],
- {"name": "demo", "description": "demo"})
+ self.admin_api.get_subject_dict_from_keystone_name(tenant['id'], admin_ie_dict['id'], 'admin').iteritems().next()
demo_subject_id, demo_subject_dict = \
- self.tenant_api.get_subject_dict_from_keystone_name(tenant['id'], admin_ie_dict['id'], 'demo').iteritems().next()
- subject_categories = self.manager.get_subject_categories_dict(admin_subject_id, authz_ie_dict["id"])
+ self.admin_api.get_subject_dict_from_keystone_name(tenant['id'], admin_ie_dict['id'], 'demo').iteritems().next()
+ subject_categories = self.authz_manager.get_subject_categories_dict(admin_subject_id, authz_ie_dict["id"])
self.assertIsInstance(subject_categories, dict)
for key, value in subject_categories.iteritems():
self.assertIsInstance(value, dict)
@@ -1473,7 +1424,7 @@ class TestIntraExtensionAuthzManagerAuthzKO(tests.TestCase):
new_subject_category = {"name": "subject_category_test", "description": "subject_category_test"}
self.assertRaises(
AuthzException,
- self.manager.add_subject_category_dict,
+ self.authz_manager.add_subject_category_dict,
demo_subject_id, admin_ie_dict["id"], new_subject_category)
subject_categories = self.admin_manager.add_subject_category_dict(admin_subject_id, authz_ie_dict["id"], new_subject_category)
@@ -1490,11 +1441,11 @@ class TestIntraExtensionAuthzManagerAuthzKO(tests.TestCase):
# Delete the new subject_category
self.assertRaises(
AuthzException,
- self.manager.del_subject_category,
+ self.authz_manager.del_subject_category,
demo_subject_id, authz_ie_dict["id"], new_subject_category["id"])
self.admin_manager.del_subject_category(admin_subject_id, authz_ie_dict["id"], new_subject_category["id"])
- subject_categories = self.manager.get_subject_categories_dict(admin_subject_id, authz_ie_dict["id"])
+ subject_categories = self.authz_manager.get_subject_categories_dict(admin_subject_id, authz_ie_dict["id"])
for key, value in subject_categories.iteritems():
self.assertIsInstance(value, dict)
self.assertIn("name", value)
@@ -1507,12 +1458,10 @@ class TestIntraExtensionAuthzManagerAuthzKO(tests.TestCase):
tenant, mapping = create_mapping(self, "demo", authz_ie_dict['id'], admin_ie_dict['id'])
admin_subject_id, admin_subject_dict = \
- self.tenant_api.get_subject_dict_from_keystone_name(tenant['id'], admin_ie_dict['id'], 'admin').iteritems().next()
- demo_subject_dict = self.manager.add_subject_dict(admin_subject_id, admin_ie_dict["id"],
- {"name": "demo", "description": "demo"})
+ self.admin_api.get_subject_dict_from_keystone_name(tenant['id'], admin_ie_dict['id'], 'admin').iteritems().next()
demo_subject_id, demo_subject_dict = \
- self.tenant_api.get_subject_dict_from_keystone_name(tenant['id'], admin_ie_dict['id'], 'demo').iteritems().next()
- object_categories = self.manager.get_object_categories_dict(admin_subject_id, authz_ie_dict["id"])
+ self.admin_api.get_subject_dict_from_keystone_name(tenant['id'], admin_ie_dict['id'], 'demo').iteritems().next()
+ object_categories = self.authz_manager.get_object_categories_dict(admin_subject_id, authz_ie_dict["id"])
self.assertIsInstance(object_categories, dict)
for key, value in object_categories.iteritems():
self.assertIsInstance(value, dict)
@@ -1522,7 +1471,7 @@ class TestIntraExtensionAuthzManagerAuthzKO(tests.TestCase):
new_object_category = {"name": "object_category_test", "description": "object_category_test"}
self.assertRaises(
AuthzException,
- self.manager.add_object_category_dict,
+ self.authz_manager.add_object_category_dict,
demo_subject_id, admin_ie_dict["id"], new_object_category)
object_categories = self.admin_manager.add_object_category_dict(admin_subject_id, authz_ie_dict["id"], new_object_category)
@@ -1539,11 +1488,11 @@ class TestIntraExtensionAuthzManagerAuthzKO(tests.TestCase):
# Delete the new object_category
self.assertRaises(
AuthzException,
- self.manager.del_object_category,
+ self.authz_manager.del_object_category,
demo_subject_id, authz_ie_dict["id"], new_object_category["id"])
self.admin_manager.del_object_category(admin_subject_id, authz_ie_dict["id"], new_object_category["id"])
- object_categories = self.manager.get_object_categories_dict(admin_subject_id, authz_ie_dict["id"])
+ object_categories = self.authz_manager.get_object_categories_dict(admin_subject_id, authz_ie_dict["id"])
for key, value in object_categories.iteritems():
self.assertIsInstance(value, dict)
self.assertIn("name", value)
@@ -1556,12 +1505,10 @@ class TestIntraExtensionAuthzManagerAuthzKO(tests.TestCase):
tenant, mapping = create_mapping(self, "demo", authz_ie_dict['id'], admin_ie_dict['id'])
admin_subject_id, admin_subject_dict = \
- self.tenant_api.get_subject_dict_from_keystone_name(tenant['id'], admin_ie_dict['id'], 'admin').iteritems().next()
- demo_subject_dict = self.manager.add_subject_dict(admin_subject_id, admin_ie_dict["id"],
- {"name": "demo", "description": "demo"})
+ self.admin_api.get_subject_dict_from_keystone_name(tenant['id'], admin_ie_dict['id'], 'admin').iteritems().next()
demo_subject_id, demo_subject_dict = \
- self.tenant_api.get_subject_dict_from_keystone_name(tenant['id'], admin_ie_dict['id'], 'demo').iteritems().next()
- action_categories = self.manager.get_action_categories_dict(admin_subject_id, authz_ie_dict["id"])
+ self.admin_api.get_subject_dict_from_keystone_name(tenant['id'], admin_ie_dict['id'], 'demo').iteritems().next()
+ action_categories = self.authz_manager.get_action_categories_dict(admin_subject_id, authz_ie_dict["id"])
self.assertIsInstance(action_categories, dict)
for key, value in action_categories.iteritems():
self.assertIsInstance(value, dict)
@@ -1571,7 +1518,7 @@ class TestIntraExtensionAuthzManagerAuthzKO(tests.TestCase):
new_action_category = {"name": "action_category_test", "description": "action_category_test"}
self.assertRaises(
AuthzException,
- self.manager.add_action_category_dict,
+ self.authz_manager.add_action_category_dict,
demo_subject_id, admin_ie_dict["id"], new_action_category)
action_categories = self.admin_manager.add_action_category_dict(admin_subject_id, authz_ie_dict["id"], new_action_category)
@@ -1588,11 +1535,11 @@ class TestIntraExtensionAuthzManagerAuthzKO(tests.TestCase):
# Delete the new action_category
self.assertRaises(
AuthzException,
- self.manager.del_action_category,
+ self.authz_manager.del_action_category,
demo_subject_id, authz_ie_dict["id"], new_action_category["id"])
self.admin_manager.del_action_category(admin_subject_id, authz_ie_dict["id"], new_action_category["id"])
- action_categories = self.manager.get_action_categories_dict(admin_subject_id, authz_ie_dict["id"])
+ action_categories = self.authz_manager.get_action_categories_dict(admin_subject_id, authz_ie_dict["id"])
for key, value in action_categories.iteritems():
self.assertIsInstance(value, dict)
self.assertIn("name", value)
@@ -1605,11 +1552,9 @@ class TestIntraExtensionAuthzManagerAuthzKO(tests.TestCase):
tenant, mapping = create_mapping(self, "demo", authz_ie_dict['id'], admin_ie_dict['id'])
admin_subject_id, admin_subject_dict = \
- self.tenant_api.get_subject_dict_from_keystone_name(tenant['id'], admin_ie_dict['id'], 'admin').iteritems().next()
- demo_subject_dict = self.manager.add_subject_dict(admin_subject_id, admin_ie_dict["id"],
- {"name": "demo", "description": "demo"})
+ self.admin_api.get_subject_dict_from_keystone_name(tenant['id'], admin_ie_dict['id'], 'admin').iteritems().next()
demo_subject_id, demo_subject_dict = \
- self.tenant_api.get_subject_dict_from_keystone_name(tenant['id'], admin_ie_dict['id'], 'demo').iteritems().next()
+ self.admin_api.get_subject_dict_from_keystone_name(tenant['id'], admin_ie_dict['id'], 'demo').iteritems().next()
subject_categories = self.admin_manager.add_subject_category_dict(
admin_subject_id,
@@ -1622,7 +1567,7 @@ class TestIntraExtensionAuthzManagerAuthzKO(tests.TestCase):
for subject_category_id in subject_categories:
- subject_category_scope = self.manager.get_subject_scopes_dict(
+ subject_category_scope = self.authz_manager.get_subject_scopes_dict(
admin_subject_id,
authz_ie_dict["id"],
subject_category_id)
@@ -1677,11 +1622,9 @@ class TestIntraExtensionAuthzManagerAuthzKO(tests.TestCase):
tenant, mapping = create_mapping(self, "demo", authz_ie_dict['id'], admin_ie_dict['id'])
admin_subject_id, admin_subject_dict = \
- self.tenant_api.get_subject_dict_from_keystone_name(tenant['id'], admin_ie_dict['id'], 'admin').iteritems().next()
- demo_subject_dict = self.manager.add_subject_dict(admin_subject_id, admin_ie_dict["id"],
- {"name": "demo", "description": "demo"})
+ self.admin_api.get_subject_dict_from_keystone_name(tenant['id'], admin_ie_dict['id'], 'admin').iteritems().next()
demo_subject_id, demo_subject_dict = \
- self.tenant_api.get_subject_dict_from_keystone_name(tenant['id'], admin_ie_dict['id'], 'demo').iteritems().next()
+ self.admin_api.get_subject_dict_from_keystone_name(tenant['id'], admin_ie_dict['id'], 'demo').iteritems().next()
object_categories = self.admin_manager.add_object_category_dict(
admin_subject_id,
@@ -1694,7 +1637,7 @@ class TestIntraExtensionAuthzManagerAuthzKO(tests.TestCase):
for object_category_id in object_categories:
- object_category_scope = self.manager.get_object_scopes_dict(
+ object_category_scope = self.authz_manager.get_object_scopes_dict(
admin_subject_id,
authz_ie_dict["id"],
object_category_id)
@@ -1749,11 +1692,9 @@ class TestIntraExtensionAuthzManagerAuthzKO(tests.TestCase):
tenant, mapping = create_mapping(self, "demo", authz_ie_dict['id'], admin_ie_dict['id'])
admin_subject_id, admin_subject_dict = \
- self.tenant_api.get_subject_dict_from_keystone_name(tenant['id'], admin_ie_dict['id'], 'admin').iteritems().next()
- demo_subject_dict = self.manager.add_subject_dict(admin_subject_id, admin_ie_dict["id"],
- {"name": "demo", "description": "demo"})
+ self.admin_api.get_subject_dict_from_keystone_name(tenant['id'], admin_ie_dict['id'], 'admin').iteritems().next()
demo_subject_id, demo_subject_dict = \
- self.tenant_api.get_subject_dict_from_keystone_name(tenant['id'], admin_ie_dict['id'], 'demo').iteritems().next()
+ self.admin_api.get_subject_dict_from_keystone_name(tenant['id'], admin_ie_dict['id'], 'demo').iteritems().next()
action_categories = self.admin_manager.add_action_category_dict(
admin_subject_id,
@@ -1766,7 +1707,7 @@ class TestIntraExtensionAuthzManagerAuthzKO(tests.TestCase):
for action_category_id in action_categories:
- action_category_scope = self.manager.get_action_scopes_dict(
+ action_category_scope = self.authz_manager.get_action_scopes_dict(
admin_subject_id,
authz_ie_dict["id"],
action_category_id)
@@ -1821,17 +1762,15 @@ class TestIntraExtensionAuthzManagerAuthzKO(tests.TestCase):
tenant, mapping = create_mapping(self, "demo", authz_ie_dict['id'], admin_ie_dict['id'])
admin_subject_id, admin_subject_dict = \
- self.tenant_api.get_subject_dict_from_keystone_name(tenant['id'], admin_ie_dict['id'], 'admin').iteritems().next()
+ self.admin_api.get_subject_dict_from_keystone_name(tenant['id'], admin_ie_dict['id'], 'admin').iteritems().next()
admin_authz_subject_id, admin_authz_subject_dict = \
- self.tenant_api.get_subject_dict_from_keystone_name(tenant['id'], authz_ie_dict['id'], 'admin').iteritems().next()
- demo_subject_dict = self.manager.add_subject_dict(admin_subject_id, admin_ie_dict["id"],
- {"name": "demo", "description": "demo"})
+ self.admin_api.get_subject_dict_from_keystone_name(tenant['id'], authz_ie_dict['id'], 'admin').iteritems().next()
demo_subject_id, demo_subject_dict = \
- self.tenant_api.get_subject_dict_from_keystone_name(tenant['id'], admin_ie_dict['id'], 'demo').iteritems().next()
+ self.admin_api.get_subject_dict_from_keystone_name(tenant['id'], admin_ie_dict['id'], 'demo').iteritems().next()
demo_authz_subject_id, demo_authz_subject_dict = \
- self.tenant_api.get_subject_dict_from_keystone_name(tenant['id'], authz_ie_dict['id'], 'demo').iteritems().next()
+ self.admin_api.get_subject_dict_from_keystone_name(tenant['id'], authz_ie_dict['id'], 'demo').iteritems().next()
- subjects_dict = self.manager.get_subjects_dict(admin_subject_id, authz_ie_dict["id"])
+ subjects_dict = self.authz_manager.get_subjects_dict(admin_subject_id, authz_ie_dict["id"])
subject_categories = self.admin_manager.add_subject_category_dict(
admin_subject_id,
@@ -1843,7 +1782,7 @@ class TestIntraExtensionAuthzManagerAuthzKO(tests.TestCase):
)
for subject_category_id in subject_categories:
- subject_category_scope = self.manager.get_subject_scopes_dict(
+ subject_category_scope = self.authz_manager.get_subject_scopes_dict(
admin_subject_id,
authz_ie_dict["id"],
subject_category_id)
@@ -1874,7 +1813,7 @@ class TestIntraExtensionAuthzManagerAuthzKO(tests.TestCase):
new_subject_category_scope_2)
subject_category_scope_2_id = subject_category_scope_2.keys()[0]
- subject_category_assignments = self.manager.get_subject_assignment_list(
+ subject_category_assignments = self.authz_manager.get_subject_assignment_list(
admin_subject_id,
authz_ie_dict["id"],
admin_authz_subject_id,
@@ -1883,7 +1822,7 @@ class TestIntraExtensionAuthzManagerAuthzKO(tests.TestCase):
self.assertIsInstance(subject_category_assignments, list)
self.assertEqual([], subject_category_assignments)
- subject_category_assignments = self.manager.get_subject_assignment_list(
+ subject_category_assignments = self.authz_manager.get_subject_assignment_list(
admin_subject_id,
authz_ie_dict["id"],
demo_authz_subject_id,
@@ -1894,14 +1833,14 @@ class TestIntraExtensionAuthzManagerAuthzKO(tests.TestCase):
self.assertRaises(
AuthzException,
- self.manager.add_subject_assignment_list,
+ self.authz_manager.add_subject_assignment_list,
demo_subject_id, authz_ie_dict["id"],
admin_authz_subject_id, subject_category_id, subject_category_scope_1_id
)
self.assertRaises(
AuthzException,
- self.manager.add_subject_assignment_list,
+ self.authz_manager.add_subject_assignment_list,
demo_subject_id, authz_ie_dict["id"],
demo_authz_subject_id, subject_category_id, subject_category_scope_2_id
)
@@ -1973,13 +1912,11 @@ class TestIntraExtensionAuthzManagerAuthzKO(tests.TestCase):
tenant, mapping = create_mapping(self, "demo", authz_ie_dict['id'], admin_ie_dict['id'])
admin_subject_id, admin_subject_dict = \
- self.tenant_api.get_subject_dict_from_keystone_name(tenant['id'], admin_ie_dict['id'], 'admin').iteritems().next()
- demo_subject_dict = self.manager.add_subject_dict(admin_subject_id, admin_ie_dict["id"],
- {"name": "demo", "description": "demo"})
+ self.admin_api.get_subject_dict_from_keystone_name(tenant['id'], admin_ie_dict['id'], 'admin').iteritems().next()
demo_subject_id, demo_subject_dict = \
- self.tenant_api.get_subject_dict_from_keystone_name(tenant['id'], admin_ie_dict['id'], 'demo').iteritems().next()
+ self.admin_api.get_subject_dict_from_keystone_name(tenant['id'], admin_ie_dict['id'], 'demo').iteritems().next()
- objects_dict = self.manager.get_objects_dict(admin_subject_id, authz_ie_dict["id"])
+ objects_dict = self.authz_manager.get_objects_dict(admin_subject_id, authz_ie_dict["id"])
object_vm1_id = None
object_vm2_id = None
@@ -2001,7 +1938,7 @@ class TestIntraExtensionAuthzManagerAuthzKO(tests.TestCase):
)
for object_category_id in object_categories:
- object_category_scope = self.manager.get_object_scopes_dict(
+ object_category_scope = self.authz_manager.get_object_scopes_dict(
admin_subject_id,
authz_ie_dict["id"],
object_category_id)
@@ -2032,7 +1969,7 @@ class TestIntraExtensionAuthzManagerAuthzKO(tests.TestCase):
new_object_category_scope_2)
object_category_scope_2_id = object_category_scope_2.keys()[0]
- object_category_assignments = self.manager.get_object_assignment_list(
+ object_category_assignments = self.authz_manager.get_object_assignment_list(
admin_subject_id,
authz_ie_dict["id"],
object_vm1_id,
@@ -2041,7 +1978,7 @@ class TestIntraExtensionAuthzManagerAuthzKO(tests.TestCase):
self.assertIsInstance(object_category_assignments, list)
self.assertEqual([], object_category_assignments)
- object_category_assignments = self.manager.get_object_assignment_list(
+ object_category_assignments = self.authz_manager.get_object_assignment_list(
admin_subject_id,
authz_ie_dict["id"],
object_vm2_id,
@@ -2052,14 +1989,14 @@ class TestIntraExtensionAuthzManagerAuthzKO(tests.TestCase):
self.assertRaises(
AuthzException,
- self.manager.add_object_assignment_list,
+ self.authz_manager.add_object_assignment_list,
demo_subject_id, authz_ie_dict["id"],
object_vm1_id, object_category_id, object_category_scope_1_id
)
self.assertRaises(
AuthzException,
- self.manager.add_object_assignment_list,
+ self.authz_manager.add_object_assignment_list,
demo_subject_id, authz_ie_dict["id"],
object_vm2_id, object_category_id, object_category_scope_2_id
)
@@ -2131,13 +2068,11 @@ class TestIntraExtensionAuthzManagerAuthzKO(tests.TestCase):
tenant, mapping = create_mapping(self, "demo", authz_ie_dict['id'], admin_ie_dict['id'])
admin_subject_id, admin_subject_dict = \
- self.tenant_api.get_subject_dict_from_keystone_name(tenant['id'], admin_ie_dict['id'], 'admin').iteritems().next()
- demo_subject_dict = self.manager.add_subject_dict(admin_subject_id, admin_ie_dict["id"],
- {"name": "demo", "description": "demo"})
+ self.admin_api.get_subject_dict_from_keystone_name(tenant['id'], admin_ie_dict['id'], 'admin').iteritems().next()
demo_subject_id, demo_subject_dict = \
- self.tenant_api.get_subject_dict_from_keystone_name(tenant['id'], admin_ie_dict['id'], 'demo').iteritems().next()
+ self.admin_api.get_subject_dict_from_keystone_name(tenant['id'], admin_ie_dict['id'], 'demo').iteritems().next()
- actions_dict = self.manager.get_actions_dict(admin_subject_id, authz_ie_dict["id"])
+ actions_dict = self.authz_manager.get_actions_dict(admin_subject_id, authz_ie_dict["id"])
action_upload_id = None
action_list_id = None
@@ -2159,7 +2094,7 @@ class TestIntraExtensionAuthzManagerAuthzKO(tests.TestCase):
)
for action_category_id in action_categories:
- action_category_scope = self.manager.get_action_scopes_dict(
+ action_category_scope = self.authz_manager.get_action_scopes_dict(
admin_subject_id,
authz_ie_dict["id"],
action_category_id)
@@ -2190,7 +2125,7 @@ class TestIntraExtensionAuthzManagerAuthzKO(tests.TestCase):
new_action_category_scope_2)
action_category_scope_2_id = action_category_scope_2.keys()[0]
- action_category_assignments = self.manager.get_action_assignment_list(
+ action_category_assignments = self.authz_manager.get_action_assignment_list(
admin_subject_id,
authz_ie_dict["id"],
action_upload_id,
@@ -2199,7 +2134,7 @@ class TestIntraExtensionAuthzManagerAuthzKO(tests.TestCase):
self.assertIsInstance(action_category_assignments, list)
self.assertEqual([], action_category_assignments)
- action_category_assignments = self.manager.get_action_assignment_list(
+ action_category_assignments = self.authz_manager.get_action_assignment_list(
admin_subject_id,
authz_ie_dict["id"],
action_list_id,
@@ -2210,14 +2145,14 @@ class TestIntraExtensionAuthzManagerAuthzKO(tests.TestCase):
self.assertRaises(
AuthzException,
- self.manager.add_action_assignment_list,
+ self.authz_manager.add_action_assignment_list,
demo_subject_id, authz_ie_dict["id"],
action_upload_id, action_category_id, action_category_scope_1_id
)
self.assertRaises(
AuthzException,
- self.manager.add_action_assignment_list,
+ self.authz_manager.add_action_assignment_list,
demo_subject_id, authz_ie_dict["id"],
action_list_id, action_category_id, action_category_scope_2_id
)
@@ -2289,11 +2224,9 @@ class TestIntraExtensionAuthzManagerAuthzKO(tests.TestCase):
tenant, mapping = create_mapping(self, "demo", authz_ie_dict['id'], admin_ie_dict['id'])
admin_subject_id, admin_subject_dict = \
- self.tenant_api.get_subject_dict_from_keystone_name(tenant['id'], admin_ie_dict['id'], 'admin').iteritems().next()
- demo_subject_dict = self.admin_manager.add_subject_dict(admin_subject_id, admin_ie_dict["id"],
- {"name": "demo", "description": "demo"})
+ self.admin_api.get_subject_dict_from_keystone_name(tenant['id'], admin_ie_dict['id'], 'admin').iteritems().next()
demo_subject_id, demo_subject_dict = \
- self.tenant_api.get_subject_dict_from_keystone_name(tenant['id'], admin_ie_dict['id'], 'demo').iteritems().next()
+ self.admin_api.get_subject_dict_from_keystone_name(tenant['id'], admin_ie_dict['id'], 'demo').iteritems().next()
aggregation_algorithms = self.admin_manager.get_aggregation_algorithm_dict(admin_subject_id, authz_ie_dict["id"])
for key, value in aggregation_algorithms.iteritems():
@@ -2331,11 +2264,9 @@ class TestIntraExtensionAuthzManagerAuthzKO(tests.TestCase):
tenant, mapping = create_mapping(self, "demo", authz_ie_dict['id'], admin_ie_dict['id'])
admin_subject_id, admin_subject_dict = \
- self.tenant_api.get_subject_dict_from_keystone_name(tenant['id'], admin_ie_dict['id'], 'admin').iteritems().next()
- demo_subject_dict = self.admin_manager.add_subject_dict(admin_subject_id, admin_ie_dict["id"],
- {"name": "demo", "description": "demo"})
+ self.admin_api.get_subject_dict_from_keystone_name(tenant['id'], admin_ie_dict['id'], 'admin').iteritems().next()
demo_subject_id, demo_subject_dict = \
- self.tenant_api.get_subject_dict_from_keystone_name(tenant['id'], admin_ie_dict['id'], 'demo').iteritems().next()
+ self.admin_api.get_subject_dict_from_keystone_name(tenant['id'], admin_ie_dict['id'], 'demo').iteritems().next()
sub_meta_rules = self.admin_manager.get_sub_meta_rules_dict(admin_subject_id, authz_ie_dict["id"])
self.assertIsInstance(sub_meta_rules, dict)
diff --git a/keystone-moon/keystone/tests/moon/unit/test_unit_core_log.py b/keystone-moon/keystone/tests/moon/unit/test_unit_core_log.py
index 17e70018..37d210aa 100644
--- a/keystone-moon/keystone/tests/moon/unit/test_unit_core_log.py
+++ b/keystone-moon/keystone/tests/moon/unit/test_unit_core_log.py
@@ -17,7 +17,7 @@ from keystone import resource
from keystone.contrib.moon.exception import *
from keystone.tests.unit import default_fixtures
from keystone.contrib.moon.core import LogManager, TenantManager
-from keystone.contrib.moon.core import ADMIN_ID
+from keystone.tests.moon.unit import *
CONF = cfg.CONF
@@ -41,15 +41,18 @@ class TestIntraExtensionAdminManager(tests.TestCase):
def setUp(self):
self.useFixture(database.Database())
super(TestIntraExtensionAdminManager, self).setUp()
- self.load_backends()
self.load_fixtures(default_fixtures)
- self.admin = self.create_user(username="admin")
- self.demo = self.create_user(username="demo")
- self.root_intra_extension = self.create_intra_extension(policy_model="policy_root")
- # force re-initialization of the ADMIN_ID variable
- from keystone.contrib.moon.core import ADMIN_ID
- self.ADMIN_ID = ADMIN_ID
- self.manager = IntraExtensionAdminManager()
+ self.load_backends()
+ domain = {'id': "default", 'name': "default"}
+ self.resource_api.create_domain(domain['id'], domain)
+ self.admin = create_user(self, username="admin")
+ self.demo = create_user(self, username="demo")
+ self.root_intra_extension = self.root_api.get_root_extension_dict()
+ self.root_intra_extension_id = self.root_intra_extension.keys()[0]
+ self.ADMIN_ID = self.root_api.get_root_admin_id()
+ self.authz_manager = self.authz_api
+ self.admin_manager = self.admin_api
+ self.tenant_manager = self.tenant_api
def __get_key_from_value(self, value, values_dict):
return filter(lambda v: v[1] == value, values_dict.iteritems())[0][0]
@@ -71,43 +74,6 @@ class TestIntraExtensionAdminManager(tests.TestCase):
group='moon',
policy_directory=self.policy_directory)
- def create_intra_extension(self, policy_model="policy_rbac_admin"):
- # Create the admin user because IntraExtension needs it
- self.admin = self.identity_api.create_user(USER_ADMIN)
- IE["policymodel"] = policy_model
- self.ref = self.manager.load_intra_extension_dict(ADMIN_ID, intra_extension_dict=IE)
- self.assertIsInstance(self.ref, dict)
- self.create_tenant(self.ref["id"])
-
- def create_tenant(self, authz_uuid):
- tenant = {
- "id": uuid.uuid4().hex,
- "name": "TestAuthzIntraExtensionManager",
- "enabled": True,
- "description": "",
- "domain_id": "default"
- }
- project = self.resource_api.create_project(tenant["id"], tenant)
- mapping = self.tenant_api.set_tenant_dict(project["id"], project["name"], authz_uuid, None)
- self.assertIsInstance(mapping, dict)
- self.assertIn("authz", mapping)
- self.assertEqual(mapping["authz"], authz_uuid)
- return mapping
-
- def create_user(self, username="TestAdminIntraExtensionManagerUser"):
- user = {
- "id": uuid.uuid4().hex,
- "name": username,
- "enabled": True,
- "description": "",
- "domain_id": "default"
- }
- _user = self.identity_api.create_user(user)
- return _user
-
- def delete_admin_intra_extension(self):
- self.manager.del_intra_extension(self.ref["id"])
-
def send_logs(self):
log_authz = "Test for authz " + uuid.uuid4().hex
logs = []
diff --git a/keystone-moon/keystone/tests/moon/unit/test_unit_core_tenant.py b/keystone-moon/keystone/tests/moon/unit/test_unit_core_tenant.py
index 995b6a54..3c136ccd 100644
--- a/keystone-moon/keystone/tests/moon/unit/test_unit_core_tenant.py
+++ b/keystone-moon/keystone/tests/moon/unit/test_unit_core_tenant.py
@@ -14,8 +14,8 @@ from keystone.contrib.moon.exception import *
from keystone.tests.unit import default_fixtures
from keystone.contrib.moon.core import LogManager
from keystone.contrib.moon.core import ConfigurationManager
-from keystone.contrib.moon.core import ADMIN_ID
from keystone.common import dependency
+from keystone.tests.moon.unit import *
CONF = cfg.CONF
@@ -37,17 +37,18 @@ class TestTenantManager(tests.TestCase):
def setUp(self):
self.useFixture(database.Database())
super(TestTenantManager, self).setUp()
- self.load_backends()
self.load_fixtures(default_fixtures)
- self.admin = self.create_user(username="admin")
- self.demo = self.create_user(username="demo")
- self.root_intra_extension = self.create_intra_extension(policy_model="policy_root")
- # force re-initialization of the ADMIN_ID variable
- from keystone.contrib.moon.core import ADMIN_ID
- self.ADMIN_ID = ADMIN_ID
- self.manager = self.tenant_api
- # self.configuration_api = self.configuration_api
- # self.configuration_api.init_default_variables()
+ self.load_backends()
+ domain = {'id': "default", 'name': "default"}
+ self.resource_api.create_domain(domain['id'], domain)
+ self.admin = create_user(self, username="admin")
+ self.demo = create_user(self, username="demo")
+ self.root_intra_extension = self.root_api.get_root_extension_dict()
+ self.root_intra_extension_id = self.root_intra_extension.keys()[0]
+ self.ADMIN_ID = self.root_api.get_root_admin_id()
+ self.authz_manager = self.authz_api
+ self.admin_manager = self.admin_api
+ self.tenant_manager = self.tenant_api
def load_extra_backends(self):
return {
@@ -67,30 +68,9 @@ class TestTenantManager(tests.TestCase):
group='moon',
policy_directory=self.policy_directory)
- def create_user(self, username="admin"):
-
- _USER = dict(USER)
- _USER["name"] = username
- return self.identity_api.create_user(_USER)
-
- def create_intra_extension(self, policy_model="policy_authz"):
-
- IE["model"] = policy_model
- IE["name"] = uuid.uuid4().hex
- genre = "admin"
- if "authz" in policy_model:
- genre = "authz"
- IE["genre"] = genre
- # force re-initialization of the ADMIN_ID variable
- from keystone.contrib.moon.core import ADMIN_ID
- self.ADMIN_ID = ADMIN_ID
- ref = self.admin_api.load_intra_extension_dict(self.ADMIN_ID, intra_extension_dict=IE)
- self.assertIsInstance(ref, dict)
- return ref
-
def test_add_tenant(self):
- authz_intra_extension = self.create_intra_extension(policy_model="policy_authz")
- admin_intra_extension = self.create_intra_extension(policy_model="policy_admin")
+ authz_intra_extension = create_intra_extension(self, policy_model="policy_authz")
+ admin_intra_extension = create_intra_extension(self, policy_model="policy_admin")
new_tenant = {
"id": uuid.uuid4().hex,
"name": "demo",
@@ -98,129 +78,128 @@ class TestTenantManager(tests.TestCase):
"intra_authz_extension_id": authz_intra_extension['id'],
"intra_admin_extension_id": admin_intra_extension['id'],
}
- data = self.manager.add_tenant_dict(user_id=self.ADMIN_ID, tenant_dict=new_tenant)
- self.assertEquals(new_tenant["id"], data["id"])
- self.assertEquals(new_tenant["name"], data['tenant']["name"])
- self.assertEquals(new_tenant["intra_authz_extension_id"], data['tenant']["intra_authz_extension_id"])
- self.assertEquals(new_tenant["intra_admin_extension_id"], data['tenant']["intra_admin_extension_id"])
- data = self.manager.get_tenants_dict(self.ADMIN_ID)
+ data = self.tenant_manager.add_tenant_dict(user_id=self.ADMIN_ID, tenant_dict=new_tenant)
+ data_id = data.keys()[0]
+ self.assertEquals(new_tenant["id"], data_id)
+ self.assertEquals(new_tenant["name"], data[data_id]["name"])
+ self.assertEquals(new_tenant["intra_authz_extension_id"], data[data_id]["intra_authz_extension_id"])
+ self.assertEquals(new_tenant["intra_admin_extension_id"], data[data_id]["intra_admin_extension_id"])
+ data = self.tenant_manager.get_tenants_dict(self.ADMIN_ID)
self.assertNotEqual(data, {})
data = self.admin_api.get_intra_extension_dict(self.ADMIN_ID, new_tenant["intra_authz_extension_id"])
- self.assertEquals(new_tenant["intra_authz_extension_id"], data["id"])
+ data_id = data["id"]
+ self.assertEquals(new_tenant["intra_authz_extension_id"], data_id)
data = self.admin_api.get_intra_extension_dict(self.ADMIN_ID, new_tenant["intra_admin_extension_id"])
- self.assertEquals(new_tenant["intra_admin_extension_id"], data["id"])
+ data_id = data["id"]
+ self.assertEquals(new_tenant["intra_admin_extension_id"], data_id)
def test_del_tenant(self):
- authz_intra_extension = self.create_intra_extension(policy_model="policy_authz")
- admin_intra_extension = self.create_intra_extension(policy_model="policy_admin")
+ authz_intra_extension = create_intra_extension(self, policy_model="policy_authz")
+ admin_intra_extension = create_intra_extension(self, policy_model="policy_admin")
new_tenant = {
- "id": uuid.uuid4().hex,
"name": "demo",
"description": uuid.uuid4().hex,
"intra_authz_extension_id": authz_intra_extension['id'],
"intra_admin_extension_id": admin_intra_extension['id'],
}
- data = self.manager.add_tenant_dict(user_id=self.ADMIN_ID, tenant_dict=new_tenant)
- self.assertEquals(new_tenant["id"], data["id"])
- self.assertEquals(new_tenant["name"], data['tenant']["name"])
- self.assertEquals(new_tenant["intra_authz_extension_id"], data['tenant']["intra_authz_extension_id"])
- self.assertEquals(new_tenant["intra_admin_extension_id"], data['tenant']["intra_admin_extension_id"])
- data = self.manager.get_tenants_dict(self.ADMIN_ID)
+ data = self.tenant_manager.add_tenant_dict(user_id=self.ADMIN_ID, tenant_dict=new_tenant)
+ data_id = data.keys()[0]
+ self.assertEquals(new_tenant["name"], data[data_id]["name"])
+ self.assertEquals(new_tenant["intra_authz_extension_id"], data[data_id]["intra_authz_extension_id"])
+ self.assertEquals(new_tenant["intra_admin_extension_id"], data[data_id]["intra_admin_extension_id"])
+ data = self.tenant_manager.get_tenants_dict(self.ADMIN_ID)
self.assertNotEqual(data, {})
- self.manager.del_tenant(self.ADMIN_ID, new_tenant["id"])
- data = self.manager.get_tenants_dict(self.ADMIN_ID)
+ self.tenant_manager.del_tenant(self.ADMIN_ID, data_id)
+ data = self.tenant_manager.get_tenants_dict(self.ADMIN_ID)
self.assertEqual(data, {})
def test_set_tenant(self):
- authz_intra_extension = self.create_intra_extension(policy_model="policy_authz")
- admin_intra_extension = self.create_intra_extension(policy_model="policy_admin")
+ authz_intra_extension = create_intra_extension(self, policy_model="policy_authz")
+ admin_intra_extension = create_intra_extension(self, policy_model="policy_admin")
new_tenant = {
- "id": uuid.uuid4().hex,
"name": "demo",
"description": uuid.uuid4().hex,
"intra_authz_extension_id": authz_intra_extension['id'],
"intra_admin_extension_id": admin_intra_extension['id'],
}
- data = self.manager.add_tenant_dict(user_id=self.ADMIN_ID, tenant_dict=new_tenant)
- self.assertEquals(new_tenant["id"], data["id"])
- self.assertEquals(new_tenant["name"], data['tenant']["name"])
- self.assertEquals(new_tenant["intra_authz_extension_id"], data['tenant']["intra_authz_extension_id"])
- self.assertEquals(new_tenant["intra_admin_extension_id"], data['tenant']["intra_admin_extension_id"])
- data = self.manager.get_tenants_dict(self.ADMIN_ID)
+ data = self.tenant_manager.add_tenant_dict(user_id=self.ADMIN_ID, tenant_dict=new_tenant)
+ data_id = data.keys()[0]
+ self.assertEquals(new_tenant["name"], data[data_id]["name"])
+ self.assertEquals(new_tenant["intra_authz_extension_id"], data[data_id]["intra_authz_extension_id"])
+ self.assertEquals(new_tenant["intra_admin_extension_id"], data[data_id]["intra_admin_extension_id"])
+ data = self.tenant_manager.get_tenants_dict(self.ADMIN_ID)
self.assertNotEqual(data, {})
new_tenant["name"] = "demo2"
- data = self.manager.set_tenant_dict(user_id=self.ADMIN_ID, tenant_id=new_tenant["id"], tenant_dict=new_tenant)
- self.assertEquals(new_tenant["id"], data["id"])
- self.assertEquals(new_tenant["name"], data['tenant']["name"])
- self.assertEquals(new_tenant["intra_authz_extension_id"], data['tenant']["intra_authz_extension_id"])
- self.assertEquals(new_tenant["intra_admin_extension_id"], data['tenant']["intra_admin_extension_id"])
+ print(new_tenant)
+ data = self.tenant_manager.set_tenant_dict(user_id=self.ADMIN_ID, tenant_id=data_id, tenant_dict=new_tenant)
+ data_id = data.keys()[0]
+ self.assertEquals(new_tenant["name"], data[data_id]["name"])
+ self.assertEquals(new_tenant["intra_authz_extension_id"], data[data_id]["intra_authz_extension_id"])
+ self.assertEquals(new_tenant["intra_admin_extension_id"], data[data_id]["intra_admin_extension_id"])
def test_exception_tenant_unknown(self):
- self.assertRaises(TenantUnknown, self.manager.get_tenant_dict, self.ADMIN_ID, uuid.uuid4().hex)
- self.assertRaises(TenantUnknown, self.manager.del_tenant, self.ADMIN_ID, uuid.uuid4().hex)
- self.assertRaises(TenantUnknown, self.manager.set_tenant_dict, self.ADMIN_ID, uuid.uuid4().hex, {})
+ self.assertRaises(TenantUnknown, self.tenant_manager.get_tenant_dict, self.ADMIN_ID, uuid.uuid4().hex)
+ self.assertRaises(TenantUnknown, self.tenant_manager.del_tenant, self.ADMIN_ID, uuid.uuid4().hex)
+ self.assertRaises(TenantUnknown, self.tenant_manager.set_tenant_dict, self.ADMIN_ID, uuid.uuid4().hex, {})
- authz_intra_extension = self.create_intra_extension(policy_model="policy_authz")
- admin_intra_extension = self.create_intra_extension(policy_model="policy_admin")
+ authz_intra_extension = create_intra_extension(self, policy_model="policy_authz")
+ admin_intra_extension = create_intra_extension(self, policy_model="policy_admin")
new_tenant = {
- "id": uuid.uuid4().hex,
"name": "demo",
"description": uuid.uuid4().hex,
"intra_authz_extension_id": authz_intra_extension['id'],
"intra_admin_extension_id": admin_intra_extension['id'],
}
- data = self.manager.add_tenant_dict(user_id=self.ADMIN_ID, tenant_dict=new_tenant)
- self.assertEquals(new_tenant["id"], data["id"])
- self.assertEquals(new_tenant["name"], data['tenant']["name"])
- self.assertEquals(new_tenant["intra_authz_extension_id"], data['tenant']["intra_authz_extension_id"])
- self.assertEquals(new_tenant["intra_admin_extension_id"], data['tenant']["intra_admin_extension_id"])
- data = self.manager.get_tenants_dict(self.ADMIN_ID)
+ data = self.tenant_manager.add_tenant_dict(user_id=self.ADMIN_ID, tenant_dict=new_tenant)
+ data_id = data.keys()[0]
+ self.assertEquals(new_tenant["name"], data[data_id]["name"])
+ self.assertEquals(new_tenant["intra_authz_extension_id"], data[data_id]["intra_authz_extension_id"])
+ self.assertEquals(new_tenant["intra_admin_extension_id"], data[data_id]["intra_admin_extension_id"])
+ data = self.tenant_manager.get_tenants_dict(self.ADMIN_ID)
self.assertNotEqual(data, {})
- self.assertRaises(TenantUnknown, self.manager.get_tenant_dict, self.ADMIN_ID, uuid.uuid4().hex)
+ self.assertRaises(TenantUnknown, self.tenant_manager.get_tenant_dict, self.ADMIN_ID, uuid.uuid4().hex)
def test_exception_tenant_added_name_existing(self):
- authz_intra_extension = self.create_intra_extension(policy_model="policy_authz")
- admin_intra_extension = self.create_intra_extension(policy_model="policy_admin")
+ authz_intra_extension = create_intra_extension(self, policy_model="policy_authz")
+ admin_intra_extension = create_intra_extension(self, policy_model="policy_admin")
new_tenant = {
- "id": uuid.uuid4().hex,
"name": "demo",
"description": uuid.uuid4().hex,
"intra_authz_extension_id": authz_intra_extension['id'],
"intra_admin_extension_id": admin_intra_extension['id'],
}
- data = self.manager.add_tenant_dict(user_id=self.ADMIN_ID, tenant_dict=new_tenant)
- self.assertEquals(new_tenant["id"], data["id"])
- self.assertEquals(new_tenant["name"], data['tenant']["name"])
- self.assertEquals(new_tenant["intra_authz_extension_id"], data['tenant']["intra_authz_extension_id"])
- self.assertEquals(new_tenant["intra_admin_extension_id"], data['tenant']["intra_admin_extension_id"])
- data = self.manager.get_tenants_dict(self.ADMIN_ID)
+ data = self.tenant_manager.add_tenant_dict(user_id=self.ADMIN_ID, tenant_dict=new_tenant)
+ data_id = data.keys()[0]
+ self.assertEquals(new_tenant["name"], data[data_id]["name"])
+ self.assertEquals(new_tenant["intra_authz_extension_id"], data[data_id]["intra_authz_extension_id"])
+ self.assertEquals(new_tenant["intra_admin_extension_id"], data[data_id]["intra_admin_extension_id"])
+ data = self.tenant_manager.get_tenants_dict(self.ADMIN_ID)
self.assertNotEqual(data, {})
- self.assertRaises(TenantAddedNameExisting, self.manager.add_tenant_dict, self.ADMIN_ID, new_tenant)
+ self.assertRaises(TenantAddedNameExisting, self.tenant_manager.add_tenant_dict, self.ADMIN_ID, new_tenant)
def test_exception_tenant_no_intra_extension(self):
- authz_intra_extension = self.create_intra_extension(policy_model="policy_authz")
- admin_intra_extension = self.create_intra_extension(policy_model="policy_admin")
+ authz_intra_extension = create_intra_extension(self, policy_model="policy_authz")
+ admin_intra_extension = create_intra_extension(self, policy_model="policy_admin")
new_tenant = {
- "id": uuid.uuid4().hex,
"name": "demo",
"description": uuid.uuid4().hex,
"intra_authz_extension_id": authz_intra_extension['id'],
"intra_admin_extension_id": admin_intra_extension['id'],
}
new_tenant['intra_authz_extension_id'] = None
- self.assertRaises(TenantNoIntraAuthzExtension, self.manager.add_tenant_dict, self.ADMIN_ID, new_tenant)
+ self.assertRaises(TenantNoIntraAuthzExtension, self.tenant_manager.add_tenant_dict, self.ADMIN_ID, new_tenant)
new_tenant['intra_authz_extension_id'] = authz_intra_extension['id']
- data = self.manager.add_tenant_dict(user_id=self.ADMIN_ID, tenant_dict=new_tenant)
- self.assertEquals(new_tenant["id"], data["id"])
- self.assertEquals(new_tenant["name"], data['tenant']["name"])
- self.assertEquals(new_tenant["intra_authz_extension_id"], data['tenant']["intra_authz_extension_id"])
- self.assertEquals(new_tenant["intra_admin_extension_id"], data['tenant']["intra_admin_extension_id"])
- data = self.manager.get_tenants_dict(self.ADMIN_ID)
+ data = self.tenant_manager.add_tenant_dict(user_id=self.ADMIN_ID, tenant_dict=new_tenant)
+ data_id = data.keys()[0]
+ self.assertEquals(new_tenant["name"], data[data_id]["name"])
+ self.assertEquals(new_tenant["intra_authz_extension_id"], data[data_id]["intra_authz_extension_id"])
+ self.assertEquals(new_tenant["intra_admin_extension_id"], data[data_id]["intra_admin_extension_id"])
+ data = self.tenant_manager.get_tenants_dict(self.ADMIN_ID)
self.assertNotEqual(data, {})
new_tenant['intra_authz_extension_id'] = None
new_tenant['name'] = "demo2"
- self.assertRaises(TenantNoIntraAuthzExtension, self.manager.set_tenant_dict, self.ADMIN_ID, new_tenant["id"], new_tenant)
+ self.assertRaises(TenantNoIntraAuthzExtension, self.tenant_manager.set_tenant_dict, self.ADMIN_ID, data_id, new_tenant)
diff --git a/keystone-moon/keystone/tests/unit/__init__.py b/keystone-moon/keystone/tests/unit/__init__.py
index c97ce253..837afe69 100644
--- a/keystone-moon/keystone/tests/unit/__init__.py
+++ b/keystone-moon/keystone/tests/unit/__init__.py
@@ -25,11 +25,9 @@ if six.PY3:
import sys
from unittest import mock # noqa: our import detection is naive?
- sys.modules['eventlet'] = mock.Mock()
- sys.modules['eventlet.green'] = mock.Mock()
- sys.modules['eventlet.wsgi'] = mock.Mock()
- sys.modules['oslo'].messaging = mock.Mock()
- sys.modules['pycadf'] = mock.Mock()
+ sys.modules['ldappool'] = mock.Mock()
+ sys.modules['memcache'] = mock.Mock()
+ sys.modules['oslo_messaging'] = mock.Mock()
sys.modules['paste'] = mock.Mock()
# NOTE(dstanek): oslo_i18n.enable_lazy() must be called before
diff --git a/keystone-moon/keystone/tests/unit/auth/__init__.py b/keystone-moon/keystone/tests/unit/auth/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/keystone-moon/keystone/tests/unit/auth/__init__.py
diff --git a/keystone-moon/keystone/tests/unit/auth/test_controllers.py b/keystone-moon/keystone/tests/unit/auth/test_controllers.py
new file mode 100644
index 00000000..76f2776a
--- /dev/null
+++ b/keystone-moon/keystone/tests/unit/auth/test_controllers.py
@@ -0,0 +1,98 @@
+# Copyright 2015 IBM Corp.
+
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import uuid
+
+import mock
+from oslo_config import cfg
+from oslo_config import fixture as config_fixture
+from oslo_utils import importutils
+from oslotest import mockpatch
+import stevedore
+from stevedore import extension
+
+from keystone.auth import controllers
+from keystone.tests import unit
+
+
+class TestLoadAuthMethod(unit.BaseTestCase):
+ def test_entrypoint_works(self):
+ method = uuid.uuid4().hex
+ plugin_name = self.getUniqueString()
+
+ # Register the method using the given plugin
+ cf = self.useFixture(config_fixture.Config())
+ cf.register_opt(cfg.StrOpt(method), group='auth')
+ cf.config(group='auth', **{method: plugin_name})
+
+ # Setup stevedore.DriverManager to return a driver for the plugin
+ extension_ = extension.Extension(
+ plugin_name, entry_point=mock.sentinel.entry_point,
+ plugin=mock.sentinel.plugin,
+ obj=mock.sentinel.driver)
+ auth_plugin_namespace = 'keystone.auth.%s' % method
+ fake_driver_manager = stevedore.DriverManager.make_test_instance(
+ extension_, namespace=auth_plugin_namespace)
+
+ driver_manager_mock = self.useFixture(mockpatch.PatchObject(
+ stevedore, 'DriverManager', return_value=fake_driver_manager)).mock
+
+ driver = controllers.load_auth_method(method)
+
+ self.assertEqual(auth_plugin_namespace, fake_driver_manager.namespace)
+ driver_manager_mock.assert_called_once_with(
+ auth_plugin_namespace, plugin_name, invoke_on_load=True)
+ self.assertIs(driver, mock.sentinel.driver)
+
+ def test_entrypoint_fails_import_works(self):
+ method = uuid.uuid4().hex
+ plugin_name = self.getUniqueString()
+
+ # Register the method using the given plugin
+ cf = self.useFixture(config_fixture.Config())
+ cf.register_opt(cfg.StrOpt(method), group='auth')
+ cf.config(group='auth', **{method: plugin_name})
+
+ # stevedore.DriverManager raises RuntimeError if it can't load the
+ # driver.
+ self.useFixture(mockpatch.PatchObject(
+ stevedore, 'DriverManager', side_effect=RuntimeError))
+
+ self.useFixture(mockpatch.PatchObject(
+ importutils, 'import_object', return_value=mock.sentinel.driver))
+
+ driver = controllers.load_auth_method(method)
+ self.assertIs(driver, mock.sentinel.driver)
+
+ def test_entrypoint_fails_import_fails(self):
+ method = uuid.uuid4().hex
+ plugin_name = self.getUniqueString()
+
+ # Register the method using the given plugin
+ cf = self.useFixture(config_fixture.Config())
+ cf.register_opt(cfg.StrOpt(method), group='auth')
+ cf.config(group='auth', **{method: plugin_name})
+
+ # stevedore.DriverManager raises RuntimeError if it can't load the
+ # driver.
+ self.useFixture(mockpatch.PatchObject(
+ stevedore, 'DriverManager', side_effect=RuntimeError))
+
+ class TestException(Exception):
+ pass
+
+ self.useFixture(mockpatch.PatchObject(
+ importutils, 'import_object', side_effect=TestException))
+
+ self.assertRaises(TestException, controllers.load_auth_method, method)
diff --git a/keystone-moon/keystone/tests/unit/backend/core_ldap.py b/keystone-moon/keystone/tests/unit/backend/core_ldap.py
index 9d6b23e1..a6cd0802 100644
--- a/keystone-moon/keystone/tests/unit/backend/core_ldap.py
+++ b/keystone-moon/keystone/tests/unit/backend/core_ldap.py
@@ -17,7 +17,6 @@ from oslo_config import cfg
from keystone.common import cache
from keystone.common import ldap as common_ldap
from keystone.common.ldap import core as common_ldap_core
-from keystone.common import sql
from keystone.tests import unit as tests
from keystone.tests.unit import default_fixtures
from keystone.tests.unit import fakeldap
@@ -57,19 +56,13 @@ class BaseBackendLdapCommon(object):
for shelf in fakeldap.FakeShelves:
fakeldap.FakeShelves[shelf].clear()
- def reload_backends(self, domain_id):
- # Only one backend unless we are using separate domain backends
- self.load_backends()
-
def get_config(self, domain_id):
# Only one conf structure unless we are using separate domain backends
return CONF
def config_overrides(self):
super(BaseBackendLdapCommon, self).config_overrides()
- self.config_fixture.config(
- group='identity',
- driver='keystone.identity.backends.ldap.Identity')
+ self.config_fixture.config(group='identity', driver='ldap')
def config_files(self):
config_files = super(BaseBackendLdapCommon, self).config_files()
@@ -116,17 +109,13 @@ class BaseBackendLdapIdentitySqlEverythingElse(tests.SQLDriverOverrides):
return config_files
def setUp(self):
- self.useFixture(database.Database())
+ sqldb = self.useFixture(database.Database())
super(BaseBackendLdapIdentitySqlEverythingElse, self).setUp()
self.clear_database()
self.load_backends()
cache.configure_cache_region(cache.REGION)
- self.engine = sql.get_engine()
- self.addCleanup(sql.cleanup)
-
- sql.ModelBase.metadata.create_all(bind=self.engine)
- self.addCleanup(sql.ModelBase.metadata.drop_all, bind=self.engine)
+ sqldb.recreate()
self.load_fixtures(default_fixtures)
# defaulted by the data load
self.user_foo['enabled'] = True
@@ -134,15 +123,9 @@ class BaseBackendLdapIdentitySqlEverythingElse(tests.SQLDriverOverrides):
def config_overrides(self):
super(BaseBackendLdapIdentitySqlEverythingElse,
self).config_overrides()
- self.config_fixture.config(
- group='identity',
- driver='keystone.identity.backends.ldap.Identity')
- self.config_fixture.config(
- group='resource',
- driver='keystone.resource.backends.sql.Resource')
- self.config_fixture.config(
- group='assignment',
- driver='keystone.assignment.backends.sql.Assignment')
+ self.config_fixture.config(group='identity', driver='ldap')
+ self.config_fixture.config(group='resource', driver='sql')
+ self.config_fixture.config(group='assignment', driver='sql')
class BaseBackendLdapIdentitySqlEverythingElseWithMapping(object):
diff --git a/keystone-moon/keystone/tests/unit/backend/domain_config/core.py b/keystone-moon/keystone/tests/unit/backend/domain_config/core.py
index da2e9bd9..c53d99b7 100644
--- a/keystone-moon/keystone/tests/unit/backend/domain_config/core.py
+++ b/keystone-moon/keystone/tests/unit/backend/domain_config/core.py
@@ -17,6 +17,7 @@ import mock
from testtools import matchers
from keystone import exception
+from keystone.tests import unit as tests
class DomainConfigTests(object):
@@ -521,3 +522,30 @@ class DomainConfigTests(object):
self.assertFalse(mock_log.warn.called)
# The escaping '%' should have been removed
self.assertEqual('my_url/%(password)s', res['ldap']['url'])
+
+ @tests.skip_if_cache_disabled('domain_config')
+ def test_cache_layer_get_sensitive_config(self):
+ config = {'ldap': {'url': uuid.uuid4().hex,
+ 'user_tree_dn': uuid.uuid4().hex,
+ 'password': uuid.uuid4().hex},
+ 'identity': {'driver': uuid.uuid4().hex}}
+ self.domain_config_api.create_config(self.domain['id'], config)
+ # cache the result
+ res = self.domain_config_api.get_config_with_sensitive_info(
+ self.domain['id'])
+ self.assertEqual(config, res)
+
+ # delete, bypassing domain config manager api
+ self.domain_config_api.delete_config_options(self.domain['id'])
+ self.domain_config_api.delete_config_options(self.domain['id'],
+ sensitive=True)
+
+ self.assertDictEqual(
+ res, self.domain_config_api.get_config_with_sensitive_info(
+ self.domain['id']))
+ self.domain_config_api.get_config_with_sensitive_info.invalidate(
+ self.domain_config_api, self.domain['id'])
+ self.assertDictEqual(
+ {},
+ self.domain_config_api.get_config_with_sensitive_info(
+ self.domain['id']))
diff --git a/keystone-moon/keystone/tests/unit/catalog/test_core.py b/keystone-moon/keystone/tests/unit/catalog/test_core.py
index 99a34280..2f334bb6 100644
--- a/keystone-moon/keystone/tests/unit/catalog/test_core.py
+++ b/keystone-moon/keystone/tests/unit/catalog/test_core.py
@@ -11,16 +11,16 @@
# under the License.
from oslo_config import cfg
-import testtools
from keystone.catalog import core
from keystone import exception
+from keystone.tests import unit
CONF = cfg.CONF
-class FormatUrlTests(testtools.TestCase):
+class FormatUrlTests(unit.BaseTestCase):
def test_successful_formatting(self):
url_template = ('http://$(public_bind_host)s:$(admin_port)d/'
@@ -72,3 +72,17 @@ class FormatUrlTests(testtools.TestCase):
core.format_url,
url_template,
values)
+
+ def test_substitution_with_allowed_keyerror(self):
+ # No value of 'tenant_id' is passed into url_template.
+ # mod: format_url will return None instead of raising
+ # "MalformedEndpoint" exception.
+ # This is intentional behavior since we don't want to skip
+ # all the later endpoints once there is an URL of endpoint
+ # trying to replace 'tenant_id' with None.
+ url_template = ('http://$(public_bind_host)s:$(admin_port)d/'
+ '$(tenant_id)s/$(user_id)s')
+ values = {'public_bind_host': 'server', 'admin_port': 9090,
+ 'user_id': 'B'}
+ self.assertIsNone(core.format_url(url_template, values,
+ silent_keyerror_failures=['tenant_id']))
diff --git a/keystone-moon/keystone/tests/unit/common/test_connection_pool.py b/keystone-moon/keystone/tests/unit/common/test_connection_pool.py
index 74d0420c..3813e033 100644
--- a/keystone-moon/keystone/tests/unit/common/test_connection_pool.py
+++ b/keystone-moon/keystone/tests/unit/common/test_connection_pool.py
@@ -10,9 +10,11 @@
# License for the specific language governing permissions and limitations
# under the License.
+import threading
import time
import mock
+import six
from six.moves import queue
import testtools
from testtools import matchers
@@ -117,3 +119,17 @@ class TestConnectionPool(core.TestCase):
# after it is available.
connection_pool.put_nowait(conn)
_acquire_connection()
+
+
+class TestMemcacheClientOverrides(core.BaseTestCase):
+
+ def test_client_stripped_of_threading_local(self):
+ """threading.local overrides are restored for _MemcacheClient"""
+ client_class = _memcache_pool._MemcacheClient
+ # get the genuine thread._local from MRO
+ thread_local = client_class.__mro__[2]
+ self.assertTrue(thread_local is threading.local)
+ for field in six.iterkeys(thread_local.__dict__):
+ if field not in ('__dict__', '__weakref__'):
+ self.assertNotEqual(id(getattr(thread_local, field, None)),
+ id(getattr(client_class, field, None)))
diff --git a/keystone-moon/keystone/tests/unit/common/test_injection.py b/keystone-moon/keystone/tests/unit/common/test_injection.py
index 86bb3c24..b4c23a84 100644
--- a/keystone-moon/keystone/tests/unit/common/test_injection.py
+++ b/keystone-moon/keystone/tests/unit/common/test_injection.py
@@ -21,6 +21,7 @@ from keystone.tests import unit as tests
class TestDependencyInjection(tests.BaseTestCase):
def setUp(self):
super(TestDependencyInjection, self).setUp()
+ dependency.reset()
self.addCleanup(dependency.reset)
def test_dependency_injection(self):
@@ -210,62 +211,6 @@ class TestDependencyInjection(tests.BaseTestCase):
self.assertFalse(dependency._REGISTRY)
- def test_optional_dependency_not_provided(self):
- requirement_name = uuid.uuid4().hex
-
- @dependency.optional(requirement_name)
- class C1(object):
- pass
-
- c1_inst = C1()
-
- dependency.resolve_future_dependencies()
-
- self.assertIsNone(getattr(c1_inst, requirement_name))
-
- def test_optional_dependency_provided(self):
- requirement_name = uuid.uuid4().hex
-
- @dependency.optional(requirement_name)
- class C1(object):
- pass
-
- @dependency.provider(requirement_name)
- class P1(object):
- pass
-
- c1_inst = C1()
- p1_inst = P1()
-
- dependency.resolve_future_dependencies()
-
- self.assertIs(getattr(c1_inst, requirement_name), p1_inst)
-
- def test_optional_and_required(self):
- p1_name = uuid.uuid4().hex
- p2_name = uuid.uuid4().hex
- optional_name = uuid.uuid4().hex
-
- @dependency.provider(p1_name)
- @dependency.requires(p2_name)
- @dependency.optional(optional_name)
- class P1(object):
- pass
-
- @dependency.provider(p2_name)
- @dependency.requires(p1_name)
- class P2(object):
- pass
-
- p1 = P1()
- p2 = P2()
-
- dependency.resolve_future_dependencies()
-
- self.assertIs(getattr(p1, p2_name), p2)
- self.assertIs(getattr(p2, p1_name), p1)
- self.assertIsNone(getattr(p1, optional_name))
-
def test_get_provider(self):
# Can get the instance of a provider using get_provider
diff --git a/keystone-moon/keystone/tests/unit/common/test_ldap.py b/keystone-moon/keystone/tests/unit/common/test_ldap.py
index 41568890..d3ce8cd2 100644
--- a/keystone-moon/keystone/tests/unit/common/test_ldap.py
+++ b/keystone-moon/keystone/tests/unit/common/test_ldap.py
@@ -11,23 +11,24 @@
# License for the specific language governing permissions and limitations
# under the License.
+import os
+import tempfile
import uuid
+import fixtures
import ldap.dn
import mock
from oslo_config import cfg
from testtools import matchers
-import os
-import shutil
-import tempfile
-
+from keystone.common import driver_hints
from keystone.common import ldap as ks_ldap
from keystone.common.ldap import core as common_ldap_core
from keystone.tests import unit as tests
from keystone.tests.unit import default_fixtures
from keystone.tests.unit import fakeldap
+
CONF = cfg.CONF
@@ -218,9 +219,7 @@ class LDAPDeleteTreeTest(tests.TestCase):
def config_overrides(self):
super(LDAPDeleteTreeTest, self).config_overrides()
- self.config_fixture.config(
- group='identity',
- driver='keystone.identity.backends.ldap.Identity')
+ self.config_fixture.config(group='identity', driver='ldap')
def config_files(self):
config_files = super(LDAPDeleteTreeTest, self).config_files()
@@ -311,8 +310,7 @@ class SslTlsTest(tests.TestCase):
def test_certdir_trust_tls(self):
# We need this to actually exist, so we create a tempdir.
- certdir = tempfile.mkdtemp()
- self.addCleanup(shutil.rmtree, certdir)
+ certdir = self.useFixture(fixtures.TempDir()).path
self.config_fixture.config(group='ldap',
url='ldap://localhost',
use_tls=True,
@@ -340,8 +338,7 @@ class SslTlsTest(tests.TestCase):
def test_certdir_trust_ldaps(self):
# We need this to actually exist, so we create a tempdir.
- certdir = tempfile.mkdtemp()
- self.addCleanup(shutil.rmtree, certdir)
+ certdir = self.useFixture(fixtures.TempDir()).path
self.config_fixture.config(group='ldap',
url='ldaps://localhost',
use_tls=False,
@@ -372,9 +369,7 @@ class LDAPPagedResultsTest(tests.TestCase):
def config_overrides(self):
super(LDAPPagedResultsTest, self).config_overrides()
- self.config_fixture.config(
- group='identity',
- driver='keystone.identity.backends.ldap.Identity')
+ self.config_fixture.config(group='identity', driver='ldap')
def config_files(self):
config_files = super(LDAPPagedResultsTest, self).config_files()
@@ -500,3 +495,68 @@ class CommonLdapTestCase(tests.BaseTestCase):
py_result = ks_ldap.convert_ldap_result(result)
# The user name should still be a string value.
self.assertEqual(user_name, py_result[0][1]['user_name'][0])
+
+
+class LDAPFilterQueryCompositionTest(tests.TestCase):
+ """These test cases test LDAP filter generation."""
+
+ def setUp(self):
+ super(LDAPFilterQueryCompositionTest, self).setUp()
+
+ self.base_ldap = ks_ldap.BaseLdap(self.config_fixture.conf)
+
+ # The tests need an attribute mapping to use.
+ self.attribute_name = uuid.uuid4().hex
+ self.filter_attribute_name = uuid.uuid4().hex
+ self.base_ldap.attribute_mapping = {
+ self.attribute_name: self.filter_attribute_name
+ }
+
+ def test_return_query_with_no_hints(self):
+ hints = driver_hints.Hints()
+ # NOTE: doesn't have to be a real query, we just need to make sure the
+ # same string is returned if there are no hints.
+ query = uuid.uuid4().hex
+ self.assertEqual(query,
+ self.base_ldap.filter_query(hints=hints, query=query))
+
+ # make sure the default query is an empty string
+ self.assertEqual('', self.base_ldap.filter_query(hints=hints))
+
+ def test_filter_with_empty_query_and_hints_set(self):
+ hints = driver_hints.Hints()
+ username = uuid.uuid4().hex
+ hints.add_filter(name=self.attribute_name,
+ value=username,
+ comparator='equals',
+ case_sensitive=False)
+ expected_ldap_filter = '(&(%s=%s))' % (
+ self.filter_attribute_name, username)
+ self.assertEqual(expected_ldap_filter,
+ self.base_ldap.filter_query(hints=hints))
+
+ def test_filter_with_both_query_and_hints_set(self):
+ hints = driver_hints.Hints()
+ # NOTE: doesn't have to be a real query, we just need to make sure the
+ # filter string is concatenated correctly
+ query = uuid.uuid4().hex
+ username = uuid.uuid4().hex
+ expected_result = '(&%(query)s(%(user_name_attr)s=%(username)s))' % (
+ {'query': query,
+ 'user_name_attr': self.filter_attribute_name,
+ 'username': username})
+ hints.add_filter(self.attribute_name, username)
+ self.assertEqual(expected_result,
+ self.base_ldap.filter_query(hints=hints, query=query))
+
+ def test_filter_with_hints_and_query_is_none(self):
+ hints = driver_hints.Hints()
+ username = uuid.uuid4().hex
+ hints.add_filter(name=self.attribute_name,
+ value=username,
+ comparator='equals',
+ case_sensitive=False)
+ expected_ldap_filter = '(&(%s=%s))' % (
+ self.filter_attribute_name, username)
+ self.assertEqual(expected_ldap_filter,
+ self.base_ldap.filter_query(hints=hints, query=None))
diff --git a/keystone-moon/keystone/tests/unit/common/test_notifications.py b/keystone-moon/keystone/tests/unit/common/test_notifications.py
index 55dd556d..2d872733 100644
--- a/keystone-moon/keystone/tests/unit/common/test_notifications.py
+++ b/keystone-moon/keystone/tests/unit/common/test_notifications.py
@@ -23,10 +23,9 @@ from pycadf import cadftaxonomy
from pycadf import cadftype
from pycadf import eventfactory
from pycadf import resource as cadfresource
-import testtools
-from keystone.common import dependency
from keystone import notifications
+from keystone.tests import unit
from keystone.tests.unit import test_v3
@@ -53,7 +52,7 @@ def register_callback(operation, resource_type=EXP_RESOURCE_TYPE):
return callback
-class AuditNotificationsTestCase(testtools.TestCase):
+class AuditNotificationsTestCase(unit.BaseTestCase):
def setUp(self):
super(AuditNotificationsTestCase, self).setUp()
self.config_fixture = self.useFixture(config_fixture.Config(CONF))
@@ -96,7 +95,7 @@ class AuditNotificationsTestCase(testtools.TestCase):
DISABLED_OPERATION)
-class NotificationsWrapperTestCase(testtools.TestCase):
+class NotificationsWrapperTestCase(unit.BaseTestCase):
def create_fake_ref(self):
resource_id = uuid.uuid4().hex
return resource_id, {
@@ -174,14 +173,7 @@ class NotificationsWrapperTestCase(testtools.TestCase):
self.assertFalse(callback.called)
-class NotificationsTestCase(testtools.TestCase):
- def setUp(self):
- super(NotificationsTestCase, self).setUp()
-
- # these should use self.config_fixture.config(), but they haven't
- # been registered yet
- CONF.rpc_backend = 'fake'
- CONF.notification_driver = ['fake']
+class NotificationsTestCase(unit.BaseTestCase):
def test_send_notification(self):
"""Test the private method _send_notification to ensure event_type,
@@ -324,7 +316,7 @@ class NotificationsForEntities(BaseNotificationTest):
def test_create_project(self):
project_ref = self.new_project_ref(domain_id=self.domain_id)
- self.assignment_api.create_project(project_ref['id'], project_ref)
+ self.resource_api.create_project(project_ref['id'], project_ref)
self._assert_last_note(
project_ref['id'], CREATED_OPERATION, 'project')
self._assert_last_audit(project_ref['id'], CREATED_OPERATION,
@@ -371,8 +363,8 @@ class NotificationsForEntities(BaseNotificationTest):
def test_delete_project(self):
project_ref = self.new_project_ref(domain_id=self.domain_id)
- self.assignment_api.create_project(project_ref['id'], project_ref)
- self.assignment_api.delete_project(project_ref['id'])
+ self.resource_api.create_project(project_ref['id'], project_ref)
+ self.resource_api.delete_project(project_ref['id'])
self._assert_last_note(
project_ref['id'], DELETED_OPERATION, 'project')
self._assert_last_audit(project_ref['id'], DELETED_OPERATION,
@@ -403,19 +395,19 @@ class NotificationsForEntities(BaseNotificationTest):
def test_update_domain(self):
domain_ref = self.new_domain_ref()
- self.assignment_api.create_domain(domain_ref['id'], domain_ref)
+ self.resource_api.create_domain(domain_ref['id'], domain_ref)
domain_ref['description'] = uuid.uuid4().hex
- self.assignment_api.update_domain(domain_ref['id'], domain_ref)
+ self.resource_api.update_domain(domain_ref['id'], domain_ref)
self._assert_last_note(domain_ref['id'], UPDATED_OPERATION, 'domain')
self._assert_last_audit(domain_ref['id'], UPDATED_OPERATION, 'domain',
cadftaxonomy.SECURITY_DOMAIN)
def test_delete_domain(self):
domain_ref = self.new_domain_ref()
- self.assignment_api.create_domain(domain_ref['id'], domain_ref)
+ self.resource_api.create_domain(domain_ref['id'], domain_ref)
domain_ref['enabled'] = False
- self.assignment_api.update_domain(domain_ref['id'], domain_ref)
- self.assignment_api.delete_domain(domain_ref['id'])
+ self.resource_api.update_domain(domain_ref['id'], domain_ref)
+ self.resource_api.delete_domain(domain_ref['id'])
self._assert_last_note(domain_ref['id'], DELETED_OPERATION, 'domain')
self._assert_last_audit(domain_ref['id'], DELETED_OPERATION, 'domain',
cadftaxonomy.SECURITY_DOMAIN)
@@ -542,19 +534,19 @@ class NotificationsForEntities(BaseNotificationTest):
def test_disable_domain(self):
domain_ref = self.new_domain_ref()
- self.assignment_api.create_domain(domain_ref['id'], domain_ref)
+ self.resource_api.create_domain(domain_ref['id'], domain_ref)
domain_ref['enabled'] = False
- self.assignment_api.update_domain(domain_ref['id'], domain_ref)
+ self.resource_api.update_domain(domain_ref['id'], domain_ref)
self._assert_notify_sent(domain_ref['id'], 'disabled', 'domain',
public=False)
def test_disable_of_disabled_domain_does_not_notify(self):
domain_ref = self.new_domain_ref()
domain_ref['enabled'] = False
- self.assignment_api.create_domain(domain_ref['id'], domain_ref)
+ self.resource_api.create_domain(domain_ref['id'], domain_ref)
# The domain_ref above is not changed during the create process. We
# can use the same ref to perform the update.
- self.assignment_api.update_domain(domain_ref['id'], domain_ref)
+ self.resource_api.update_domain(domain_ref['id'], domain_ref)
self._assert_notify_not_sent(domain_ref['id'], 'disabled', 'domain',
public=False)
@@ -568,8 +560,8 @@ class NotificationsForEntities(BaseNotificationTest):
def test_update_project(self):
project_ref = self.new_project_ref(domain_id=self.domain_id)
- self.assignment_api.create_project(project_ref['id'], project_ref)
- self.assignment_api.update_project(project_ref['id'], project_ref)
+ self.resource_api.create_project(project_ref['id'], project_ref)
+ self.resource_api.update_project(project_ref['id'], project_ref)
self._assert_notify_sent(
project_ref['id'], UPDATED_OPERATION, 'project', public=True)
self._assert_last_audit(project_ref['id'], UPDATED_OPERATION,
@@ -577,27 +569,27 @@ class NotificationsForEntities(BaseNotificationTest):
def test_disable_project(self):
project_ref = self.new_project_ref(domain_id=self.domain_id)
- self.assignment_api.create_project(project_ref['id'], project_ref)
+ self.resource_api.create_project(project_ref['id'], project_ref)
project_ref['enabled'] = False
- self.assignment_api.update_project(project_ref['id'], project_ref)
+ self.resource_api.update_project(project_ref['id'], project_ref)
self._assert_notify_sent(project_ref['id'], 'disabled', 'project',
public=False)
def test_disable_of_disabled_project_does_not_notify(self):
project_ref = self.new_project_ref(domain_id=self.domain_id)
project_ref['enabled'] = False
- self.assignment_api.create_project(project_ref['id'], project_ref)
+ self.resource_api.create_project(project_ref['id'], project_ref)
# The project_ref above is not changed during the create process. We
# can use the same ref to perform the update.
- self.assignment_api.update_project(project_ref['id'], project_ref)
+ self.resource_api.update_project(project_ref['id'], project_ref)
self._assert_notify_not_sent(project_ref['id'], 'disabled', 'project',
public=False)
def test_update_project_does_not_send_disable(self):
project_ref = self.new_project_ref(domain_id=self.domain_id)
- self.assignment_api.create_project(project_ref['id'], project_ref)
+ self.resource_api.create_project(project_ref['id'], project_ref)
project_ref['enabled'] = True
- self.assignment_api.update_project(project_ref['id'], project_ref)
+ self.resource_api.update_project(project_ref['id'], project_ref)
self._assert_last_note(
project_ref['id'], UPDATED_OPERATION, 'project')
self._assert_notify_not_sent(project_ref['id'], 'disabled', 'project')
@@ -665,7 +657,7 @@ class TestEventCallbacks(test_v3.RestfulTestCase):
def test_notification_received(self):
callback = register_callback(CREATED_OPERATION, 'project')
project_ref = self.new_project_ref(domain_id=self.domain_id)
- self.assignment_api.create_project(project_ref['id'], project_ref)
+ self.resource_api.create_project(project_ref['id'], project_ref)
self.assertTrue(callback.called)
def test_notification_method_not_callable(self):
@@ -694,14 +686,14 @@ class TestEventCallbacks(test_v3.RestfulTestCase):
resource_type,
self._project_deleted_callback)
- def test_provider_event_callbacks_subscription(self):
+ def test_provider_event_callback_subscription(self):
callback_called = []
- @dependency.provider('foo_api')
+ @notifications.listener
class Foo(object):
def __init__(self):
self.event_callbacks = {
- CREATED_OPERATION: {'project': [self.foo_callback]}}
+ CREATED_OPERATION: {'project': self.foo_callback}}
def foo_callback(self, service, resource_type, operation,
payload):
@@ -710,24 +702,73 @@ class TestEventCallbacks(test_v3.RestfulTestCase):
Foo()
project_ref = self.new_project_ref(domain_id=self.domain_id)
- self.assignment_api.create_project(project_ref['id'], project_ref)
+ self.resource_api.create_project(project_ref['id'], project_ref)
self.assertEqual([True], callback_called)
+ def test_provider_event_callbacks_subscription(self):
+ callback_called = []
+
+ @notifications.listener
+ class Foo(object):
+ def __init__(self):
+ self.event_callbacks = {
+ CREATED_OPERATION: {
+ 'project': [self.callback_0, self.callback_1]}}
+
+ def callback_0(self, service, resource_type, operation, payload):
+ # uses callback_called from the closure
+ callback_called.append('cb0')
+
+ def callback_1(self, service, resource_type, operation, payload):
+ # uses callback_called from the closure
+ callback_called.append('cb1')
+
+ Foo()
+ project_ref = self.new_project_ref(domain_id=self.domain_id)
+ self.resource_api.create_project(project_ref['id'], project_ref)
+ self.assertItemsEqual(['cb1', 'cb0'], callback_called)
+
def test_invalid_event_callbacks(self):
- @dependency.provider('foo_api')
+ @notifications.listener
class Foo(object):
def __init__(self):
self.event_callbacks = 'bogus'
- self.assertRaises(ValueError, Foo)
+ self.assertRaises(AttributeError, Foo)
def test_invalid_event_callbacks_event(self):
- @dependency.provider('foo_api')
+ @notifications.listener
class Foo(object):
def __init__(self):
self.event_callbacks = {CREATED_OPERATION: 'bogus'}
- self.assertRaises(ValueError, Foo)
+ self.assertRaises(AttributeError, Foo)
+
+ def test_using_an_unbound_method_as_a_callback_fails(self):
+ # NOTE(dstanek): An unbound method is when you reference a method
+ # from a class object. You'll get a method that isn't bound to a
+ # particular instance so there is no magic 'self'. You can call it,
+ # but you have to pass in the instance manually like: C.m(C()).
+ # If you reference the method from an instance then you get a method
+ # that effectively curries the self argument for you
+ # (think functools.partial). Obviously is we don't have an
+ # instance then we can't call the method.
+ @notifications.listener
+ class Foo(object):
+ def __init__(self):
+ self.event_callbacks = {CREATED_OPERATION:
+ {'project': Foo.callback}}
+
+ def callback(self, *args):
+ pass
+
+ # TODO(dstanek): it would probably be nice to fail early using
+ # something like:
+ # self.assertRaises(TypeError, Foo)
+ Foo()
+ project_ref = self.new_project_ref(domain_id=self.domain_id)
+ self.assertRaises(TypeError, self.resource_api.create_project,
+ project_ref['id'], project_ref)
class CadfNotificationsWrapperTestCase(test_v3.RestfulTestCase):
@@ -759,13 +800,14 @@ class CadfNotificationsWrapperTestCase(test_v3.RestfulTestCase):
'action': action,
'initiator': initiator,
'event': event,
+ 'event_type': event_type,
'send_notification_called': True}
self._notifications.append(note)
self.useFixture(mockpatch.PatchObject(
notifications, '_send_audit_notification', fake_notify))
- def _assert_last_note(self, action, user_id):
+ def _assert_last_note(self, action, user_id, event_type=None):
self.assertTrue(self._notifications)
note = self._notifications[-1]
self.assertEqual(note['action'], action)
@@ -773,6 +815,8 @@ class CadfNotificationsWrapperTestCase(test_v3.RestfulTestCase):
self.assertEqual(initiator.id, user_id)
self.assertEqual(initiator.host.address, self.LOCAL_HOST)
self.assertTrue(note['send_notification_called'])
+ if event_type:
+ self.assertEqual(note['event_type'], event_type)
def _assert_event(self, role_id, project=None, domain=None,
user=None, group=None, inherit=False):
@@ -816,10 +860,10 @@ class CadfNotificationsWrapperTestCase(test_v3.RestfulTestCase):
self.assertEqual(project, event.project)
if domain:
self.assertEqual(domain, event.domain)
- if user:
- self.assertEqual(user, event.user)
if group:
self.assertEqual(group, event.group)
+ elif user:
+ self.assertEqual(user, event.user)
self.assertEqual(role_id, event.role)
self.assertEqual(inherit, event.inherited_to_projects)
@@ -857,12 +901,16 @@ class CadfNotificationsWrapperTestCase(test_v3.RestfulTestCase):
user=None, group=None):
self.put(url)
action = "%s.%s" % (CREATED_OPERATION, self.ROLE_ASSIGNMENT)
- self._assert_last_note(action, self.user_id)
+ event_type = '%s.%s.%s' % (notifications.SERVICE,
+ self.ROLE_ASSIGNMENT, CREATED_OPERATION)
+ self._assert_last_note(action, self.user_id, event_type)
self._assert_event(role, project, domain, user, group)
self.delete(url)
action = "%s.%s" % (DELETED_OPERATION, self.ROLE_ASSIGNMENT)
- self._assert_last_note(action, self.user_id)
- self._assert_event(role, project, domain, user, group)
+ event_type = '%s.%s.%s' % (notifications.SERVICE,
+ self.ROLE_ASSIGNMENT, DELETED_OPERATION)
+ self._assert_last_note(action, self.user_id, event_type)
+ self._assert_event(role, project, domain, user, None)
def test_user_project_grant(self):
url = ('/projects/%s/users/%s/roles/%s' %
@@ -874,14 +922,50 @@ class CadfNotificationsWrapperTestCase(test_v3.RestfulTestCase):
def test_group_domain_grant(self):
group_ref = self.new_group_ref(domain_id=self.domain_id)
group = self.identity_api.create_group(group_ref)
+ self.identity_api.add_user_to_group(self.user_id, group['id'])
url = ('/domains/%s/groups/%s/roles/%s' %
(self.domain_id, group['id'], self.role_id))
self._test_role_assignment(url, self.role_id,
domain=self.domain_id,
+ user=self.user_id,
group=group['id'])
+ def test_add_role_to_user_and_project(self):
+ # A notification is sent when add_role_to_user_and_project is called on
+ # the assignment manager.
+
+ project_ref = self.new_project_ref(self.domain_id)
+ project = self.resource_api.create_project(
+ project_ref['id'], project_ref)
+ tenant_id = project['id']
+
+ self.assignment_api.add_role_to_user_and_project(
+ self.user_id, tenant_id, self.role_id)
+
+ self.assertTrue(self._notifications)
+ note = self._notifications[-1]
+ self.assertEqual(note['action'], 'created.role_assignment')
+ self.assertTrue(note['send_notification_called'])
+
+ self._assert_event(self.role_id, project=tenant_id, user=self.user_id)
+
+ def test_remove_role_from_user_and_project(self):
+ # A notification is sent when remove_role_from_user_and_project is
+ # called on the assignment manager.
+
+ self.assignment_api.remove_role_from_user_and_project(
+ self.user_id, self.project_id, self.role_id)
+
+ self.assertTrue(self._notifications)
+ note = self._notifications[-1]
+ self.assertEqual(note['action'], 'deleted.role_assignment')
+ self.assertTrue(note['send_notification_called'])
+
+ self._assert_event(self.role_id, project=self.project_id,
+ user=self.user_id)
+
-class TestCallbackRegistration(testtools.TestCase):
+class TestCallbackRegistration(unit.BaseTestCase):
def setUp(self):
super(TestCallbackRegistration, self).setUp()
self.mock_log = mock.Mock()
diff --git a/keystone-moon/keystone/tests/unit/common/test_utils.py b/keystone-moon/keystone/tests/unit/common/test_utils.py
index 184c8141..e8bac3c0 100644
--- a/keystone-moon/keystone/tests/unit/common/test_utils.py
+++ b/keystone-moon/keystone/tests/unit/common/test_utils.py
@@ -150,7 +150,7 @@ class UtilsTestCase(tests.BaseTestCase):
def test_pki_encoder(self):
data = {'field': 'value'}
json = jsonutils.dumps(data, cls=common_utils.PKIEncoder)
- expected_json = b'{"field":"value"}'
+ expected_json = '{"field":"value"}'
self.assertEqual(expected_json, json)
diff --git a/keystone-moon/keystone/tests/unit/config_files/backend_ldap_sql.conf b/keystone-moon/keystone/tests/unit/config_files/backend_ldap_sql.conf
index 8a06f2f9..2097b68b 100644
--- a/keystone-moon/keystone/tests/unit/config_files/backend_ldap_sql.conf
+++ b/keystone-moon/keystone/tests/unit/config_files/backend_ldap_sql.conf
@@ -2,7 +2,7 @@
#For a specific location file based sqlite use:
#connection = sqlite:////tmp/keystone.db
#To Test MySQL:
-#connection = mysql://keystone:keystone@localhost/keystone?charset=utf8
+#connection = mysql+pymysql://keystone:keystone@localhost/keystone?charset=utf8
#To Test PostgreSQL:
#connection = postgresql://keystone:keystone@localhost/keystone?client_encoding=utf8
idle_timeout = 200
diff --git a/keystone-moon/keystone/tests/unit/config_files/backend_multi_ldap_sql.conf b/keystone-moon/keystone/tests/unit/config_files/backend_multi_ldap_sql.conf
index 2d04d83d..5185770b 100644
--- a/keystone-moon/keystone/tests/unit/config_files/backend_multi_ldap_sql.conf
+++ b/keystone-moon/keystone/tests/unit/config_files/backend_multi_ldap_sql.conf
@@ -3,7 +3,7 @@ connection = sqlite://
#For a file based sqlite use
#connection = sqlite:////tmp/keystone.db
#To Test MySQL:
-#connection = mysql://keystone:keystone@localhost/keystone?charset=utf8
+#connection = mysql+pymysql://keystone:keystone@localhost/keystone?charset=utf8
#To Test PostgreSQL:
#connection = postgresql://keystone:keystone@localhost/keystone?client_encoding=utf8
idle_timeout = 200
diff --git a/keystone-moon/keystone/tests/unit/config_files/backend_mysql.conf b/keystone-moon/keystone/tests/unit/config_files/backend_mysql.conf
index d612f729..142ca203 100644
--- a/keystone-moon/keystone/tests/unit/config_files/backend_mysql.conf
+++ b/keystone-moon/keystone/tests/unit/config_files/backend_mysql.conf
@@ -1,4 +1,4 @@
#Used for running the Migrate tests against a live Mysql Server
#See _sql_livetest.py
[database]
-connection = mysql://keystone:keystone@localhost/keystone_test?charset=utf8
+connection = mysql+pymysql://keystone:keystone@localhost/keystone_test?charset=utf8
diff --git a/keystone-moon/keystone/tests/unit/config_files/backend_sql.conf b/keystone-moon/keystone/tests/unit/config_files/backend_sql.conf
index 9d401af3..063177bd 100644
--- a/keystone-moon/keystone/tests/unit/config_files/backend_sql.conf
+++ b/keystone-moon/keystone/tests/unit/config_files/backend_sql.conf
@@ -2,7 +2,7 @@
#For a specific location file based sqlite use:
#connection = sqlite:////tmp/keystone.db
#To Test MySQL:
-#connection = mysql://keystone:keystone@localhost/keystone?charset=utf8
+#connection = mysql+pymysql://keystone:keystone@localhost/keystone?charset=utf8
#To Test PostgreSQL:
#connection = postgresql://keystone:keystone@localhost/keystone?client_encoding=utf8
idle_timeout = 200
diff --git a/keystone-moon/keystone/tests/unit/config_files/domain_configs_default_ldap_one_sql/keystone.domain1.conf b/keystone-moon/keystone/tests/unit/config_files/domain_configs_default_ldap_one_sql/keystone.domain1.conf
index a4492a67..fecc7bea 100644
--- a/keystone-moon/keystone/tests/unit/config_files/domain_configs_default_ldap_one_sql/keystone.domain1.conf
+++ b/keystone-moon/keystone/tests/unit/config_files/domain_configs_default_ldap_one_sql/keystone.domain1.conf
@@ -2,4 +2,4 @@
# 'domain1' for use with unit tests.
[identity]
-driver = keystone.identity.backends.sql.Identity \ No newline at end of file
+driver = sql \ No newline at end of file
diff --git a/keystone-moon/keystone/tests/unit/config_files/domain_configs_multi_ldap/keystone.Default.conf b/keystone-moon/keystone/tests/unit/config_files/domain_configs_multi_ldap/keystone.Default.conf
index 7049afed..2dd86c25 100644
--- a/keystone-moon/keystone/tests/unit/config_files/domain_configs_multi_ldap/keystone.Default.conf
+++ b/keystone-moon/keystone/tests/unit/config_files/domain_configs_multi_ldap/keystone.Default.conf
@@ -11,4 +11,4 @@ password = password
suffix = cn=example,cn=com
[identity]
-driver = keystone.identity.backends.ldap.Identity \ No newline at end of file
+driver = ldap \ No newline at end of file
diff --git a/keystone-moon/keystone/tests/unit/config_files/domain_configs_multi_ldap/keystone.domain1.conf b/keystone-moon/keystone/tests/unit/config_files/domain_configs_multi_ldap/keystone.domain1.conf
index 6b7e2488..ba22cdf9 100644
--- a/keystone-moon/keystone/tests/unit/config_files/domain_configs_multi_ldap/keystone.domain1.conf
+++ b/keystone-moon/keystone/tests/unit/config_files/domain_configs_multi_ldap/keystone.domain1.conf
@@ -8,4 +8,4 @@ password = password
suffix = cn=example,cn=com
[identity]
-driver = keystone.identity.backends.ldap.Identity \ No newline at end of file
+driver = ldap \ No newline at end of file
diff --git a/keystone-moon/keystone/tests/unit/config_files/domain_configs_multi_ldap/keystone.domain2.conf b/keystone-moon/keystone/tests/unit/config_files/domain_configs_multi_ldap/keystone.domain2.conf
index 0ed68eb9..a14179e3 100644
--- a/keystone-moon/keystone/tests/unit/config_files/domain_configs_multi_ldap/keystone.domain2.conf
+++ b/keystone-moon/keystone/tests/unit/config_files/domain_configs_multi_ldap/keystone.domain2.conf
@@ -10,4 +10,4 @@ group_tree_dn = ou=UserGroups,dc=myroot,dc=org
user_tree_dn = ou=Users,dc=myroot,dc=org
[identity]
-driver = keystone.identity.backends.ldap.Identity \ No newline at end of file
+driver = ldap \ No newline at end of file
diff --git a/keystone-moon/keystone/tests/unit/config_files/domain_configs_one_extra_sql/keystone.domain2.conf b/keystone-moon/keystone/tests/unit/config_files/domain_configs_one_extra_sql/keystone.domain2.conf
index 81b44462..925b26f2 100644
--- a/keystone-moon/keystone/tests/unit/config_files/domain_configs_one_extra_sql/keystone.domain2.conf
+++ b/keystone-moon/keystone/tests/unit/config_files/domain_configs_one_extra_sql/keystone.domain2.conf
@@ -2,4 +2,4 @@
# 'domain2' for use with unit tests.
[identity]
-driver = keystone.identity.backends.sql.Identity \ No newline at end of file
+driver = sql \ No newline at end of file
diff --git a/keystone-moon/keystone/tests/unit/config_files/domain_configs_one_sql_one_ldap/keystone.Default.conf b/keystone-moon/keystone/tests/unit/config_files/domain_configs_one_sql_one_ldap/keystone.Default.conf
index 7049afed..2dd86c25 100644
--- a/keystone-moon/keystone/tests/unit/config_files/domain_configs_one_sql_one_ldap/keystone.Default.conf
+++ b/keystone-moon/keystone/tests/unit/config_files/domain_configs_one_sql_one_ldap/keystone.Default.conf
@@ -11,4 +11,4 @@ password = password
suffix = cn=example,cn=com
[identity]
-driver = keystone.identity.backends.ldap.Identity \ No newline at end of file
+driver = ldap \ No newline at end of file
diff --git a/keystone-moon/keystone/tests/unit/config_files/domain_configs_one_sql_one_ldap/keystone.domain1.conf b/keystone-moon/keystone/tests/unit/config_files/domain_configs_one_sql_one_ldap/keystone.domain1.conf
index a4492a67..fecc7bea 100644
--- a/keystone-moon/keystone/tests/unit/config_files/domain_configs_one_sql_one_ldap/keystone.domain1.conf
+++ b/keystone-moon/keystone/tests/unit/config_files/domain_configs_one_sql_one_ldap/keystone.domain1.conf
@@ -2,4 +2,4 @@
# 'domain1' for use with unit tests.
[identity]
-driver = keystone.identity.backends.sql.Identity \ No newline at end of file
+driver = sql \ No newline at end of file
diff --git a/keystone-moon/keystone/tests/unit/config_files/test_auth_plugin.conf b/keystone-moon/keystone/tests/unit/config_files/test_auth_plugin.conf
index abcc43ba..4a9e87d5 100644
--- a/keystone-moon/keystone/tests/unit/config_files/test_auth_plugin.conf
+++ b/keystone-moon/keystone/tests/unit/config_files/test_auth_plugin.conf
@@ -1,7 +1,4 @@
[auth]
methods = external,password,token,simple_challenge_response,saml2,openid,x509
simple_challenge_response = keystone.tests.unit.test_auth_plugin.SimpleChallengeResponse
-saml2 = keystone.auth.plugins.mapped.Mapped
-openid = keystone.auth.plugins.mapped.Mapped
-x509 = keystone.auth.plugins.mapped.Mapped
diff --git a/keystone-moon/keystone/tests/unit/contrib/__init__.py b/keystone-moon/keystone/tests/unit/contrib/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/keystone-moon/keystone/tests/unit/contrib/__init__.py
diff --git a/keystone-moon/keystone/tests/unit/contrib/federation/__init__.py b/keystone-moon/keystone/tests/unit/contrib/federation/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/keystone-moon/keystone/tests/unit/contrib/federation/__init__.py
diff --git a/keystone-moon/keystone/tests/unit/contrib/federation/test_utils.py b/keystone-moon/keystone/tests/unit/contrib/federation/test_utils.py
new file mode 100644
index 00000000..a8b4ae76
--- /dev/null
+++ b/keystone-moon/keystone/tests/unit/contrib/federation/test_utils.py
@@ -0,0 +1,611 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+
+from keystone.auth.plugins import mapped
+from keystone.contrib.federation import utils as mapping_utils
+from keystone import exception
+from keystone.tests import unit
+from keystone.tests.unit import mapping_fixtures
+
+
+class MappingRuleEngineTests(unit.BaseTestCase):
+ """A class for testing the mapping rule engine."""
+
+ def assertValidMappedUserObject(self, mapped_properties,
+ user_type='ephemeral',
+ domain_id=None):
+ """Check whether mapped properties object has 'user' within.
+
+ According to today's rules, RuleProcessor does not have to issue user's
+ id or name. What's actually required is user's type and for ephemeral
+ users that would be service domain named 'Federated'.
+ """
+ self.assertIn('user', mapped_properties,
+ message='Missing user object in mapped properties')
+ user = mapped_properties['user']
+ self.assertIn('type', user)
+ self.assertEqual(user_type, user['type'])
+ self.assertIn('domain', user)
+ domain = user['domain']
+ domain_name_or_id = domain.get('id') or domain.get('name')
+ domain_ref = domain_id or 'Federated'
+ self.assertEqual(domain_ref, domain_name_or_id)
+
+ def test_rule_engine_any_one_of_and_direct_mapping(self):
+ """Should return user's name and group id EMPLOYEE_GROUP_ID.
+
+ The ADMIN_ASSERTION should successfully have a match in MAPPING_LARGE.
+ They will test the case where `any_one_of` is valid, and there is
+ a direct mapping for the users name.
+
+ """
+
+ mapping = mapping_fixtures.MAPPING_LARGE
+ assertion = mapping_fixtures.ADMIN_ASSERTION
+ rp = mapping_utils.RuleProcessor(mapping['rules'])
+ values = rp.process(assertion)
+
+ fn = assertion.get('FirstName')
+ ln = assertion.get('LastName')
+ full_name = '%s %s' % (fn, ln)
+ group_ids = values.get('group_ids')
+ user_name = values.get('user', {}).get('name')
+
+ self.assertIn(mapping_fixtures.EMPLOYEE_GROUP_ID, group_ids)
+ self.assertEqual(full_name, user_name)
+
+ def test_rule_engine_no_regex_match(self):
+ """Should deny authorization, the email of the tester won't match.
+
+ This will not match since the email in the assertion will fail
+ the regex test. It is set to match any @example.com address.
+ But the incoming value is set to eviltester@example.org.
+ RuleProcessor should return list of empty group_ids.
+
+ """
+
+ mapping = mapping_fixtures.MAPPING_LARGE
+ assertion = mapping_fixtures.BAD_TESTER_ASSERTION
+ rp = mapping_utils.RuleProcessor(mapping['rules'])
+ mapped_properties = rp.process(assertion)
+
+ self.assertValidMappedUserObject(mapped_properties)
+ self.assertIsNone(mapped_properties['user'].get('name'))
+ self.assertListEqual(list(), mapped_properties['group_ids'])
+
+ def test_rule_engine_regex_many_groups(self):
+ """Should return group CONTRACTOR_GROUP_ID.
+
+ The TESTER_ASSERTION should successfully have a match in
+ MAPPING_TESTER_REGEX. This will test the case where many groups
+ are in the assertion, and a regex value is used to try and find
+ a match.
+
+ """
+
+ mapping = mapping_fixtures.MAPPING_TESTER_REGEX
+ assertion = mapping_fixtures.TESTER_ASSERTION
+ rp = mapping_utils.RuleProcessor(mapping['rules'])
+ values = rp.process(assertion)
+
+ self.assertValidMappedUserObject(values)
+ user_name = assertion.get('UserName')
+ group_ids = values.get('group_ids')
+ name = values.get('user', {}).get('name')
+
+ self.assertEqual(user_name, name)
+ self.assertIn(mapping_fixtures.TESTER_GROUP_ID, group_ids)
+
+ def test_rule_engine_any_one_of_many_rules(self):
+ """Should return group CONTRACTOR_GROUP_ID.
+
+ The CONTRACTOR_ASSERTION should successfully have a match in
+ MAPPING_SMALL. This will test the case where many rules
+ must be matched, including an `any_one_of`, and a direct
+ mapping.
+
+ """
+
+ mapping = mapping_fixtures.MAPPING_SMALL
+ assertion = mapping_fixtures.CONTRACTOR_ASSERTION
+ rp = mapping_utils.RuleProcessor(mapping['rules'])
+ values = rp.process(assertion)
+
+ self.assertValidMappedUserObject(values)
+ user_name = assertion.get('UserName')
+ group_ids = values.get('group_ids')
+ name = values.get('user', {}).get('name')
+
+ self.assertEqual(user_name, name)
+ self.assertIn(mapping_fixtures.CONTRACTOR_GROUP_ID, group_ids)
+
+ def test_rule_engine_not_any_of_and_direct_mapping(self):
+ """Should return user's name and email.
+
+ The CUSTOMER_ASSERTION should successfully have a match in
+ MAPPING_LARGE. This will test the case where a requirement
+ has `not_any_of`, and direct mapping to a username, no group.
+
+ """
+
+ mapping = mapping_fixtures.MAPPING_LARGE
+ assertion = mapping_fixtures.CUSTOMER_ASSERTION
+ rp = mapping_utils.RuleProcessor(mapping['rules'])
+ values = rp.process(assertion)
+
+ self.assertValidMappedUserObject(values)
+ user_name = assertion.get('UserName')
+ group_ids = values.get('group_ids')
+ name = values.get('user', {}).get('name')
+
+ self.assertEqual(user_name, name)
+ self.assertEqual([], group_ids,)
+
+ def test_rule_engine_not_any_of_many_rules(self):
+ """Should return group EMPLOYEE_GROUP_ID.
+
+ The EMPLOYEE_ASSERTION should successfully have a match in
+ MAPPING_SMALL. This will test the case where many remote
+ rules must be matched, including a `not_any_of`.
+
+ """
+
+ mapping = mapping_fixtures.MAPPING_SMALL
+ assertion = mapping_fixtures.EMPLOYEE_ASSERTION
+ rp = mapping_utils.RuleProcessor(mapping['rules'])
+ values = rp.process(assertion)
+
+ self.assertValidMappedUserObject(values)
+ user_name = assertion.get('UserName')
+ group_ids = values.get('group_ids')
+ name = values.get('user', {}).get('name')
+
+ self.assertEqual(user_name, name)
+ self.assertIn(mapping_fixtures.EMPLOYEE_GROUP_ID, group_ids)
+
+ def test_rule_engine_not_any_of_regex_verify_pass(self):
+ """Should return group DEVELOPER_GROUP_ID.
+
+ The DEVELOPER_ASSERTION should successfully have a match in
+ MAPPING_DEVELOPER_REGEX. This will test the case where many
+ remote rules must be matched, including a `not_any_of`, with
+ regex set to True.
+
+ """
+
+ mapping = mapping_fixtures.MAPPING_DEVELOPER_REGEX
+ assertion = mapping_fixtures.DEVELOPER_ASSERTION
+ rp = mapping_utils.RuleProcessor(mapping['rules'])
+ values = rp.process(assertion)
+
+ self.assertValidMappedUserObject(values)
+ user_name = assertion.get('UserName')
+ group_ids = values.get('group_ids')
+ name = values.get('user', {}).get('name')
+
+ self.assertEqual(user_name, name)
+ self.assertIn(mapping_fixtures.DEVELOPER_GROUP_ID, group_ids)
+
+ def test_rule_engine_not_any_of_regex_verify_fail(self):
+ """Should deny authorization.
+
+ The email in the assertion will fail the regex test.
+ It is set to reject any @example.org address, but the
+ incoming value is set to evildeveloper@example.org.
+ RuleProcessor should return list of empty group_ids.
+
+ """
+
+ mapping = mapping_fixtures.MAPPING_DEVELOPER_REGEX
+ assertion = mapping_fixtures.BAD_DEVELOPER_ASSERTION
+ rp = mapping_utils.RuleProcessor(mapping['rules'])
+ mapped_properties = rp.process(assertion)
+
+ self.assertValidMappedUserObject(mapped_properties)
+ self.assertIsNone(mapped_properties['user'].get('name'))
+ self.assertListEqual(list(), mapped_properties['group_ids'])
+
+ def _rule_engine_regex_match_and_many_groups(self, assertion):
+ """Should return group DEVELOPER_GROUP_ID and TESTER_GROUP_ID.
+
+ A helper function injecting assertion passed as an argument.
+ Expect DEVELOPER_GROUP_ID and TESTER_GROUP_ID in the results.
+
+ """
+
+ mapping = mapping_fixtures.MAPPING_LARGE
+ rp = mapping_utils.RuleProcessor(mapping['rules'])
+ values = rp.process(assertion)
+
+ user_name = assertion.get('UserName')
+ group_ids = values.get('group_ids')
+ name = values.get('user', {}).get('name')
+
+ self.assertValidMappedUserObject(values)
+ self.assertEqual(user_name, name)
+ self.assertIn(mapping_fixtures.DEVELOPER_GROUP_ID, group_ids)
+ self.assertIn(mapping_fixtures.TESTER_GROUP_ID, group_ids)
+
+ def test_rule_engine_regex_match_and_many_groups(self):
+ """Should return group DEVELOPER_GROUP_ID and TESTER_GROUP_ID.
+
+ The TESTER_ASSERTION should successfully have a match in
+ MAPPING_LARGE. This will test a successful regex match
+ for an `any_one_of` evaluation type, and will have many
+ groups returned.
+
+ """
+ self._rule_engine_regex_match_and_many_groups(
+ mapping_fixtures.TESTER_ASSERTION)
+
+ def test_rule_engine_discards_nonstring_objects(self):
+ """Check whether RuleProcessor discards non string objects.
+
+ Despite the fact that assertion is malformed and contains
+ non string objects, RuleProcessor should correctly discard them and
+ successfully have a match in MAPPING_LARGE.
+
+ """
+ self._rule_engine_regex_match_and_many_groups(
+ mapping_fixtures.MALFORMED_TESTER_ASSERTION)
+
+ def test_rule_engine_fails_after_discarding_nonstring(self):
+ """Check whether RuleProcessor discards non string objects.
+
+ Expect RuleProcessor to discard non string object, which
+ is required for a correct rule match. RuleProcessor will result with
+ empty list of groups.
+
+ """
+ mapping = mapping_fixtures.MAPPING_SMALL
+ rp = mapping_utils.RuleProcessor(mapping['rules'])
+ assertion = mapping_fixtures.CONTRACTOR_MALFORMED_ASSERTION
+ mapped_properties = rp.process(assertion)
+ self.assertValidMappedUserObject(mapped_properties)
+ self.assertIsNone(mapped_properties['user'].get('name'))
+ self.assertListEqual(list(), mapped_properties['group_ids'])
+
+ def test_rule_engine_returns_group_names(self):
+ """Check whether RuleProcessor returns group names with their domains.
+
+ RuleProcessor should return 'group_names' entry with a list of
+ dictionaries with two entries 'name' and 'domain' identifying group by
+ its name and domain.
+
+ """
+ mapping = mapping_fixtures.MAPPING_GROUP_NAMES
+ rp = mapping_utils.RuleProcessor(mapping['rules'])
+ assertion = mapping_fixtures.EMPLOYEE_ASSERTION
+ mapped_properties = rp.process(assertion)
+ self.assertIsNotNone(mapped_properties)
+ self.assertValidMappedUserObject(mapped_properties)
+ reference = {
+ mapping_fixtures.DEVELOPER_GROUP_NAME:
+ {
+ "name": mapping_fixtures.DEVELOPER_GROUP_NAME,
+ "domain": {
+ "name": mapping_fixtures.DEVELOPER_GROUP_DOMAIN_NAME
+ }
+ },
+ mapping_fixtures.TESTER_GROUP_NAME:
+ {
+ "name": mapping_fixtures.TESTER_GROUP_NAME,
+ "domain": {
+ "id": mapping_fixtures.DEVELOPER_GROUP_DOMAIN_ID
+ }
+ }
+ }
+ for rule in mapped_properties['group_names']:
+ self.assertDictEqual(reference.get(rule.get('name')), rule)
+
+ def test_rule_engine_whitelist_and_direct_groups_mapping(self):
+ """Should return user's groups Developer and Contractor.
+
+ The EMPLOYEE_ASSERTION_MULTIPLE_GROUPS should successfully have a match
+ in MAPPING_GROUPS_WHITELIST. It will test the case where 'whitelist'
+ correctly filters out Manager and only allows Developer and Contractor.
+
+ """
+
+ mapping = mapping_fixtures.MAPPING_GROUPS_WHITELIST
+ assertion = mapping_fixtures.EMPLOYEE_ASSERTION_MULTIPLE_GROUPS
+ rp = mapping_utils.RuleProcessor(mapping['rules'])
+ mapped_properties = rp.process(assertion)
+ self.assertIsNotNone(mapped_properties)
+
+ reference = {
+ mapping_fixtures.DEVELOPER_GROUP_NAME:
+ {
+ "name": mapping_fixtures.DEVELOPER_GROUP_NAME,
+ "domain": {
+ "id": mapping_fixtures.DEVELOPER_GROUP_DOMAIN_ID
+ }
+ },
+ mapping_fixtures.CONTRACTOR_GROUP_NAME:
+ {
+ "name": mapping_fixtures.CONTRACTOR_GROUP_NAME,
+ "domain": {
+ "id": mapping_fixtures.DEVELOPER_GROUP_DOMAIN_ID
+ }
+ }
+ }
+ for rule in mapped_properties['group_names']:
+ self.assertDictEqual(reference.get(rule.get('name')), rule)
+
+ self.assertEqual('tbo', mapped_properties['user']['name'])
+ self.assertEqual([], mapped_properties['group_ids'])
+
+ def test_rule_engine_blacklist_and_direct_groups_mapping(self):
+ """Should return user's group Developer.
+
+ The EMPLOYEE_ASSERTION_MULTIPLE_GROUPS should successfully have a match
+ in MAPPING_GROUPS_BLACKLIST. It will test the case where 'blacklist'
+ correctly filters out Manager and Developer and only allows Contractor.
+
+ """
+
+ mapping = mapping_fixtures.MAPPING_GROUPS_BLACKLIST
+ assertion = mapping_fixtures.EMPLOYEE_ASSERTION_MULTIPLE_GROUPS
+ rp = mapping_utils.RuleProcessor(mapping['rules'])
+ mapped_properties = rp.process(assertion)
+ self.assertIsNotNone(mapped_properties)
+
+ reference = {
+ mapping_fixtures.CONTRACTOR_GROUP_NAME:
+ {
+ "name": mapping_fixtures.CONTRACTOR_GROUP_NAME,
+ "domain": {
+ "id": mapping_fixtures.DEVELOPER_GROUP_DOMAIN_ID
+ }
+ }
+ }
+ for rule in mapped_properties['group_names']:
+ self.assertDictEqual(reference.get(rule.get('name')), rule)
+ self.assertEqual('tbo', mapped_properties['user']['name'])
+ self.assertEqual([], mapped_properties['group_ids'])
+
+ def test_rule_engine_blacklist_and_direct_groups_mapping_multiples(self):
+ """Tests matching multiple values before the blacklist.
+
+ Verifies that the local indexes are correct when matching multiple
+ remote values for a field when the field occurs before the blacklist
+ entry in the remote rules.
+
+ """
+
+ mapping = mapping_fixtures.MAPPING_GROUPS_BLACKLIST_MULTIPLES
+ assertion = mapping_fixtures.EMPLOYEE_ASSERTION_MULTIPLE_GROUPS
+ rp = mapping_utils.RuleProcessor(mapping['rules'])
+ mapped_properties = rp.process(assertion)
+ self.assertIsNotNone(mapped_properties)
+
+ reference = {
+ mapping_fixtures.CONTRACTOR_GROUP_NAME:
+ {
+ "name": mapping_fixtures.CONTRACTOR_GROUP_NAME,
+ "domain": {
+ "id": mapping_fixtures.DEVELOPER_GROUP_DOMAIN_ID
+ }
+ }
+ }
+ for rule in mapped_properties['group_names']:
+ self.assertDictEqual(reference.get(rule.get('name')), rule)
+ self.assertEqual('tbo', mapped_properties['user']['name'])
+ self.assertEqual([], mapped_properties['group_ids'])
+
+ def test_rule_engine_whitelist_direct_group_mapping_missing_domain(self):
+ """Test if the local rule is rejected upon missing domain value
+
+ This is a variation with a ``whitelist`` filter.
+
+ """
+ mapping = mapping_fixtures.MAPPING_GROUPS_WHITELIST_MISSING_DOMAIN
+ assertion = mapping_fixtures.EMPLOYEE_ASSERTION_MULTIPLE_GROUPS
+ rp = mapping_utils.RuleProcessor(mapping['rules'])
+ self.assertRaises(exception.ValidationError, rp.process, assertion)
+
+ def test_rule_engine_blacklist_direct_group_mapping_missing_domain(self):
+ """Test if the local rule is rejected upon missing domain value
+
+ This is a variation with a ``blacklist`` filter.
+
+ """
+ mapping = mapping_fixtures.MAPPING_GROUPS_BLACKLIST_MISSING_DOMAIN
+ assertion = mapping_fixtures.EMPLOYEE_ASSERTION_MULTIPLE_GROUPS
+ rp = mapping_utils.RuleProcessor(mapping['rules'])
+ self.assertRaises(exception.ValidationError, rp.process, assertion)
+
+ def test_rule_engine_no_groups_allowed(self):
+ """Should return user mapped to no groups.
+
+ The EMPLOYEE_ASSERTION should successfully have a match
+ in MAPPING_GROUPS_WHITELIST, but 'whitelist' should filter out
+ the group values from the assertion and thus map to no groups.
+
+ """
+ mapping = mapping_fixtures.MAPPING_GROUPS_WHITELIST
+ assertion = mapping_fixtures.EMPLOYEE_ASSERTION
+ rp = mapping_utils.RuleProcessor(mapping['rules'])
+ mapped_properties = rp.process(assertion)
+ self.assertIsNotNone(mapped_properties)
+ self.assertListEqual(mapped_properties['group_names'], [])
+ self.assertListEqual(mapped_properties['group_ids'], [])
+ self.assertEqual('tbo', mapped_properties['user']['name'])
+
+ def test_mapping_federated_domain_specified(self):
+ """Test mapping engine when domain 'ephemeral' is explicitely set.
+
+ For that, we use mapping rule MAPPING_EPHEMERAL_USER and assertion
+ EMPLOYEE_ASSERTION
+
+ """
+ mapping = mapping_fixtures.MAPPING_EPHEMERAL_USER
+ rp = mapping_utils.RuleProcessor(mapping['rules'])
+ assertion = mapping_fixtures.EMPLOYEE_ASSERTION
+ mapped_properties = rp.process(assertion)
+ self.assertIsNotNone(mapped_properties)
+ self.assertValidMappedUserObject(mapped_properties)
+
+ def test_create_user_object_with_bad_mapping(self):
+ """Test if user object is created even with bad mapping.
+
+ User objects will be created by mapping engine always as long as there
+ is corresponding local rule. This test shows, that even with assertion
+ where no group names nor ids are matched, but there is 'blind' rule for
+ mapping user, such object will be created.
+
+ In this test MAPPING_EHPEMERAL_USER expects UserName set to jsmith
+ whereas value from assertion is 'tbo'.
+
+ """
+ mapping = mapping_fixtures.MAPPING_EPHEMERAL_USER
+ rp = mapping_utils.RuleProcessor(mapping['rules'])
+ assertion = mapping_fixtures.CONTRACTOR_ASSERTION
+ mapped_properties = rp.process(assertion)
+ self.assertIsNotNone(mapped_properties)
+ self.assertValidMappedUserObject(mapped_properties)
+
+ self.assertNotIn('id', mapped_properties['user'])
+ self.assertNotIn('name', mapped_properties['user'])
+
+ def test_set_ephemeral_domain_to_ephemeral_users(self):
+ """Test auto assigning service domain to ephemeral users.
+
+ Test that ephemeral users will always become members of federated
+ service domain. The check depends on ``type`` value which must be set
+ to ``ephemeral`` in case of ephemeral user.
+
+ """
+ mapping = mapping_fixtures.MAPPING_EPHEMERAL_USER_LOCAL_DOMAIN
+ rp = mapping_utils.RuleProcessor(mapping['rules'])
+ assertion = mapping_fixtures.CONTRACTOR_ASSERTION
+ mapped_properties = rp.process(assertion)
+ self.assertIsNotNone(mapped_properties)
+ self.assertValidMappedUserObject(mapped_properties)
+
+ def test_local_user_local_domain(self):
+ """Test that local users can have non-service domains assigned."""
+ mapping = mapping_fixtures.MAPPING_LOCAL_USER_LOCAL_DOMAIN
+ rp = mapping_utils.RuleProcessor(mapping['rules'])
+ assertion = mapping_fixtures.CONTRACTOR_ASSERTION
+ mapped_properties = rp.process(assertion)
+ self.assertIsNotNone(mapped_properties)
+ self.assertValidMappedUserObject(
+ mapped_properties, user_type='local',
+ domain_id=mapping_fixtures.LOCAL_DOMAIN)
+
+ def test_user_identifications_name(self):
+ """Test varius mapping options and how users are identified.
+
+ This test calls mapped.setup_username() for propagating user object.
+
+ Test plan:
+ - Check if the user has proper domain ('federated') set
+ - Check if the user has property type set ('ephemeral')
+ - Check if user's name is properly mapped from the assertion
+ - Check if user's id is properly set and equal to name, as it was not
+ explicitely specified in the mapping.
+
+ """
+ mapping = mapping_fixtures.MAPPING_USER_IDS
+ rp = mapping_utils.RuleProcessor(mapping['rules'])
+ assertion = mapping_fixtures.CONTRACTOR_ASSERTION
+ mapped_properties = rp.process(assertion)
+ self.assertIsNotNone(mapped_properties)
+ self.assertValidMappedUserObject(mapped_properties)
+ mapped.setup_username({}, mapped_properties)
+ self.assertEqual('jsmith', mapped_properties['user']['id'])
+ self.assertEqual('jsmith', mapped_properties['user']['name'])
+
+ def test_user_identifications_name_and_federated_domain(self):
+ """Test varius mapping options and how users are identified.
+
+ This test calls mapped.setup_username() for propagating user object.
+
+ Test plan:
+ - Check if the user has proper domain ('federated') set
+ - Check if the user has propert type set ('ephemeral')
+ - Check if user's name is properly mapped from the assertion
+ - Check if user's id is properly set and equal to name, as it was not
+ explicitely specified in the mapping.
+
+ """
+ mapping = mapping_fixtures.MAPPING_USER_IDS
+ rp = mapping_utils.RuleProcessor(mapping['rules'])
+ assertion = mapping_fixtures.EMPLOYEE_ASSERTION
+ mapped_properties = rp.process(assertion)
+ self.assertIsNotNone(mapped_properties)
+ self.assertValidMappedUserObject(mapped_properties)
+ mapped.setup_username({}, mapped_properties)
+ self.assertEqual('tbo', mapped_properties['user']['name'])
+ self.assertEqual('abc123%40example.com',
+ mapped_properties['user']['id'])
+
+ def test_user_identification_id(self):
+ """Test varius mapping options and how users are identified.
+
+ This test calls mapped.setup_username() for propagating user object.
+
+ Test plan:
+ - Check if the user has proper domain ('federated') set
+ - Check if the user has propert type set ('ephemeral')
+ - Check if user's id is properly mapped from the assertion
+ - Check if user's name is properly set and equal to id, as it was not
+ explicitely specified in the mapping.
+
+ """
+ mapping = mapping_fixtures.MAPPING_USER_IDS
+ rp = mapping_utils.RuleProcessor(mapping['rules'])
+ assertion = mapping_fixtures.ADMIN_ASSERTION
+ mapped_properties = rp.process(assertion)
+ context = {'environment': {}}
+ self.assertIsNotNone(mapped_properties)
+ self.assertValidMappedUserObject(mapped_properties)
+ mapped.setup_username(context, mapped_properties)
+ self.assertEqual('bob', mapped_properties['user']['name'])
+ self.assertEqual('bob', mapped_properties['user']['id'])
+
+ def test_user_identification_id_and_name(self):
+ """Test varius mapping options and how users are identified.
+
+ This test calls mapped.setup_username() for propagating user object.
+
+ Test plan:
+ - Check if the user has proper domain ('federated') set
+ - Check if the user has proper type set ('ephemeral')
+ - Check if user's name is properly mapped from the assertion
+ - Check if user's id is properly set and and equal to value hardcoded
+ in the mapping
+
+ This test does two iterations with different assertions used as input
+ for the Mapping Engine. Different assertions will be matched with
+ different rules in the ruleset, effectively issuing different user_id
+ (hardcoded values). In the first iteration, the hardcoded user_id is
+ not url-safe and we expect Keystone to make it url safe. In the latter
+ iteration, provided user_id is already url-safe and we expect server
+ not to change it.
+
+ """
+ testcases = [(mapping_fixtures.CUSTOMER_ASSERTION, 'bwilliams'),
+ (mapping_fixtures.EMPLOYEE_ASSERTION, 'tbo')]
+ for assertion, exp_user_name in testcases:
+ mapping = mapping_fixtures.MAPPING_USER_IDS
+ rp = mapping_utils.RuleProcessor(mapping['rules'])
+ mapped_properties = rp.process(assertion)
+ context = {'environment': {}}
+ self.assertIsNotNone(mapped_properties)
+ self.assertValidMappedUserObject(mapped_properties)
+ mapped.setup_username(context, mapped_properties)
+ self.assertEqual(exp_user_name, mapped_properties['user']['name'])
+ self.assertEqual('abc123%40example.com',
+ mapped_properties['user']['id'])
diff --git a/keystone-moon/keystone/tests/unit/core.py b/keystone-moon/keystone/tests/unit/core.py
index caca7dbd..e999b641 100644
--- a/keystone-moon/keystone/tests/unit/core.py
+++ b/keystone-moon/keystone/tests/unit/core.py
@@ -45,6 +45,7 @@ from keystone.common import config as common_cfg
from keystone.common import dependency
from keystone.common import kvs
from keystone.common.kvs import core as kvs_core
+from keystone.common import sql
from keystone import config
from keystone import controllers
from keystone import exception
@@ -145,8 +146,9 @@ def remove_generated_paste_config(extension_name):
def skip_if_cache_disabled(*sections):
- """This decorator is used to skip a test if caching is disabled either
- globally or for the specific section.
+ """This decorator is used to skip a test if caching is disabled.
+
+ Caching can be disabled either globally or for a specific section.
In the code fragment::
@@ -163,6 +165,7 @@ def skip_if_cache_disabled(*sections):
If a specified configuration section does not define the `caching` option,
this decorator makes the same assumption as the `should_cache_fn` in
keystone.common.cache that caching should be enabled.
+
"""
def wrapper(f):
@functools.wraps(f)
@@ -180,9 +183,7 @@ def skip_if_cache_disabled(*sections):
def skip_if_no_multiple_domains_support(f):
- """This decorator is used to skip a test if an identity driver
- does not support multiple domains.
- """
+ """Decorator to skip tests for identity drivers limited to one domain."""
@functools.wraps(f)
def wrapper(*args, **kwargs):
test_obj = args[0]
@@ -215,7 +216,7 @@ class TestClient(object):
req = webob.Request.blank(path)
req.method = method
- for k, v in six.iteritems(headers):
+ for k, v in headers.items():
req.headers[k] = v
if body:
req.body = body
@@ -244,6 +245,13 @@ class BaseTestCase(oslotest.BaseTestCase):
super(BaseTestCase, self).setUp()
self.useFixture(mockpatch.PatchObject(sys, 'exit',
side_effect=UnexpectedExit))
+ self.useFixture(mockpatch.PatchObject(logging.Handler, 'handleError',
+ side_effect=BadLog))
+
+ warnings.filterwarnings('error', category=DeprecationWarning,
+ module='^keystone\\.')
+ warnings.simplefilter('error', exc.SAWarning)
+ self.addCleanup(warnings.resetwarnings)
def cleanup_instance(self, *names):
"""Create a function suitable for use with self.addCleanup.
@@ -261,13 +269,17 @@ class BaseTestCase(oslotest.BaseTestCase):
return cleanup
-@dependency.requires('revoke_api')
class TestCase(BaseTestCase):
def config_files(self):
return []
def config_overrides(self):
+ # NOTE(morganfainberg): enforce config_overrides can only ever be
+ # called a single time.
+ assert self.__config_overrides_called is False
+ self.__config_overrides_called = True
+
signing_certfile = 'examples/pki/certs/signing_cert.pem'
signing_keyfile = 'examples/pki/private/signing_key.pem'
self.config_fixture.config(group='oslo_policy',
@@ -281,30 +293,20 @@ class TestCase(BaseTestCase):
proxies=['keystone.tests.unit.test_cache.CacheIsolatingProxy'])
self.config_fixture.config(
group='catalog',
- driver='keystone.catalog.backends.templated.Catalog',
+ driver='templated',
template_file=dirs.tests('default_catalog.templates'))
self.config_fixture.config(
- group='identity',
- driver='keystone.identity.backends.sql.Identity')
- self.config_fixture.config(
group='kvs',
backends=[
('keystone.tests.unit.test_kvs.'
'KVSBackendForcedKeyMangleFixture'),
'keystone.tests.unit.test_kvs.KVSBackendFixture'])
- self.config_fixture.config(
- group='revoke',
- driver='keystone.contrib.revoke.backends.kvs.Revoke')
+ self.config_fixture.config(group='revoke', driver='kvs')
self.config_fixture.config(
group='signing', certfile=signing_certfile,
keyfile=signing_keyfile,
ca_certs='examples/pki/certs/cacert.pem')
- self.config_fixture.config(
- group='token',
- driver='keystone.token.persistence.backends.kvs.Token')
- self.config_fixture.config(
- group='trust',
- driver='keystone.trust.backends.sql.Trust')
+ self.config_fixture.config(group='token', driver='kvs')
self.config_fixture.config(
group='saml', certfile=signing_certfile, keyfile=signing_keyfile)
self.config_fixture.config(
@@ -327,28 +329,21 @@ class TestCase(BaseTestCase):
self.auth_plugin_config_override()
def auth_plugin_config_override(self, methods=None, **method_classes):
- if methods is None:
- methods = ['external', 'password', 'token', ]
- if not method_classes:
- method_classes = dict(
- external='keystone.auth.plugins.external.DefaultDomain',
- password='keystone.auth.plugins.password.Password',
- token='keystone.auth.plugins.token.Token',
- )
- self.config_fixture.config(group='auth', methods=methods)
- common_cfg.setup_authentication()
+ if methods is not None:
+ self.config_fixture.config(group='auth', methods=methods)
+ common_cfg.setup_authentication()
if method_classes:
self.config_fixture.config(group='auth', **method_classes)
+ def _assert_config_overrides_called(self):
+ assert self.__config_overrides_called is True
+
def setUp(self):
super(TestCase, self).setUp()
- self.addCleanup(self.cleanup_instance('config_fixture', 'logger'))
-
+ self.__config_overrides_called = False
self.addCleanup(CONF.reset)
-
- self.useFixture(mockpatch.PatchObject(logging.Handler, 'handleError',
- side_effect=BadLog))
self.config_fixture = self.useFixture(config_fixture.Config(CONF))
+ self.addCleanup(delattr, self, 'config_fixture')
self.config(self.config_files())
# NOTE(morganfainberg): mock the auth plugin setup to use the config
@@ -356,13 +351,15 @@ class TestCase(BaseTestCase):
# cleanup.
def mocked_register_auth_plugin_opt(conf, opt):
self.config_fixture.register_opt(opt, group='auth')
- self.register_auth_plugin_opt_patch = self.useFixture(
- mockpatch.PatchObject(common_cfg, '_register_auth_plugin_opt',
- new=mocked_register_auth_plugin_opt))
+ self.useFixture(mockpatch.PatchObject(
+ common_cfg, '_register_auth_plugin_opt',
+ new=mocked_register_auth_plugin_opt))
self.config_overrides()
+ # NOTE(morganfainberg): ensure config_overrides has been called.
+ self.addCleanup(self._assert_config_overrides_called)
- self.logger = self.useFixture(fixtures.FakeLogger(level=logging.DEBUG))
+ self.useFixture(fixtures.FakeLogger(level=logging.DEBUG))
# NOTE(morganfainberg): This code is a copy from the oslo-incubator
# log module. This is not in a function or otherwise available to use
@@ -374,11 +371,6 @@ class TestCase(BaseTestCase):
logger = logging.getLogger(mod)
logger.setLevel(level_name)
- warnings.filterwarnings('error', category=DeprecationWarning,
- module='^keystone\\.')
- warnings.simplefilter('error', exc.SAWarning)
- self.addCleanup(warnings.resetwarnings)
-
self.useFixture(ksfixtures.Cache())
# Clear the registry of providers so that providers from previous
@@ -397,6 +389,7 @@ class TestCase(BaseTestCase):
self.addCleanup(setattr, controllers, '_VERSIONS', [])
def config(self, config_files):
+ sql.initialize()
CONF(args=[], project='keystone', default_config_files=config_files)
def load_backends(self):
@@ -417,9 +410,9 @@ class TestCase(BaseTestCase):
drivers, _unused = common.setup_backends(
load_extra_backends_fn=self.load_extra_backends)
- for manager_name, manager in six.iteritems(drivers):
+ for manager_name, manager in drivers.items():
setattr(self, manager_name, manager)
- self.addCleanup(self.cleanup_instance(*drivers.keys()))
+ self.addCleanup(self.cleanup_instance(*list(drivers.keys())))
def load_extra_backends(self):
"""Override to load managers that aren't loaded by default.
@@ -541,15 +534,9 @@ class TestCase(BaseTestCase):
def assertNotEmpty(self, l):
self.assertTrue(len(l))
- def assertDictEqual(self, d1, d2, msg=None):
- self.assertIsInstance(d1, dict)
- self.assertIsInstance(d2, dict)
- self.assertEqual(d1, d2, msg)
-
def assertRaisesRegexp(self, expected_exception, expected_regexp,
callable_obj, *args, **kwargs):
- """Asserts that the message in a raised exception matches a regexp.
- """
+ """Asserts that the message in a raised exception matches a regexp."""
try:
callable_obj(*args, **kwargs)
except expected_exception as exc_value:
@@ -573,43 +560,6 @@ class TestCase(BaseTestCase):
excName = str(expected_exception)
raise self.failureException("%s not raised" % excName)
- def assertDictContainsSubset(self, expected, actual, msg=None):
- """Checks whether actual is a superset of expected."""
-
- def safe_repr(obj, short=False):
- _MAX_LENGTH = 80
- try:
- result = repr(obj)
- except Exception:
- result = object.__repr__(obj)
- if not short or len(result) < _MAX_LENGTH:
- return result
- return result[:_MAX_LENGTH] + ' [truncated]...'
-
- missing = []
- mismatched = []
- for key, value in six.iteritems(expected):
- if key not in actual:
- missing.append(key)
- elif value != actual[key]:
- mismatched.append('%s, expected: %s, actual: %s' %
- (safe_repr(key), safe_repr(value),
- safe_repr(actual[key])))
-
- if not (missing or mismatched):
- return
-
- standardMsg = ''
- if missing:
- standardMsg = 'Missing: %s' % ','.join(safe_repr(m) for m in
- missing)
- if mismatched:
- if standardMsg:
- standardMsg += '; '
- standardMsg += 'Mismatched values: %s' % ','.join(mismatched)
-
- self.fail(self._formatMessage(msg, standardMsg))
-
@property
def ipv6_enabled(self):
if socket.has_ipv6:
@@ -640,21 +590,9 @@ class SQLDriverOverrides(object):
def config_overrides(self):
super(SQLDriverOverrides, self).config_overrides()
# SQL specific driver overrides
- self.config_fixture.config(
- group='catalog',
- driver='keystone.catalog.backends.sql.Catalog')
- self.config_fixture.config(
- group='identity',
- driver='keystone.identity.backends.sql.Identity')
- self.config_fixture.config(
- group='policy',
- driver='keystone.policy.backends.sql.Policy')
- self.config_fixture.config(
- group='revoke',
- driver='keystone.contrib.revoke.backends.sql.Revoke')
- self.config_fixture.config(
- group='token',
- driver='keystone.token.persistence.backends.sql.Token')
- self.config_fixture.config(
- group='trust',
- driver='keystone.trust.backends.sql.Trust')
+ self.config_fixture.config(group='catalog', driver='sql')
+ self.config_fixture.config(group='identity', driver='sql')
+ self.config_fixture.config(group='policy', driver='sql')
+ self.config_fixture.config(group='revoke', driver='sql')
+ self.config_fixture.config(group='token', driver='sql')
+ self.config_fixture.config(group='trust', driver='sql')
diff --git a/keystone-moon/keystone/tests/unit/default_fixtures.py b/keystone-moon/keystone/tests/unit/default_fixtures.py
index f7e2064f..80b0665f 100644
--- a/keystone-moon/keystone/tests/unit/default_fixtures.py
+++ b/keystone-moon/keystone/tests/unit/default_fixtures.py
@@ -25,6 +25,7 @@ TENANTS = [
'description': 'description',
'enabled': True,
'parent_id': None,
+ 'is_domain': False,
}, {
'id': 'baz',
'name': 'BAZ',
@@ -32,6 +33,7 @@ TENANTS = [
'description': 'description',
'enabled': True,
'parent_id': None,
+ 'is_domain': False,
}, {
'id': 'mtu',
'name': 'MTU',
@@ -39,6 +41,7 @@ TENANTS = [
'enabled': True,
'domain_id': DEFAULT_DOMAIN_ID,
'parent_id': None,
+ 'is_domain': False,
}, {
'id': 'service',
'name': 'service',
@@ -46,6 +49,7 @@ TENANTS = [
'enabled': True,
'domain_id': DEFAULT_DOMAIN_ID,
'parent_id': None,
+ 'is_domain': False,
}
]
diff --git a/keystone-moon/keystone/tests/unit/fakeldap.py b/keystone-moon/keystone/tests/unit/fakeldap.py
index 85aaadfe..2f1ebe57 100644
--- a/keystone-moon/keystone/tests/unit/fakeldap.py
+++ b/keystone-moon/keystone/tests/unit/fakeldap.py
@@ -87,7 +87,7 @@ def _internal_attr(attr_name, value_or_values):
return [attr_fn(value_or_values)]
-def _match_query(query, attrs):
+def _match_query(query, attrs, attrs_checked):
"""Match an ldap query to an attribute dictionary.
The characters &, |, and ! are supported in the query. No syntax checking
@@ -102,12 +102,14 @@ def _match_query(query, attrs):
matchfn = any
# cut off the & or |
groups = _paren_groups(inner[1:])
- return matchfn(_match_query(group, attrs) for group in groups)
+ return matchfn(_match_query(group, attrs, attrs_checked)
+ for group in groups)
if inner.startswith('!'):
# cut off the ! and the nested parentheses
- return not _match_query(query[2:-1], attrs)
+ return not _match_query(query[2:-1], attrs, attrs_checked)
(k, _sep, v) = inner.partition('=')
+ attrs_checked.add(k.lower())
return _match(k, v, attrs)
@@ -210,7 +212,7 @@ FakeShelves = {}
class FakeLdap(core.LDAPHandler):
- '''Emulate the python-ldap API.
+ """Emulate the python-ldap API.
The python-ldap API requires all strings to be UTF-8 encoded. This
is assured by the caller of this interface
@@ -223,7 +225,8 @@ class FakeLdap(core.LDAPHandler):
strings, decodes them to unicode for operations internal to this
emulation, and encodes them back to UTF-8 when returning values
from the emulation.
- '''
+
+ """
__prefix = 'ldap:'
@@ -254,7 +257,7 @@ class FakeLdap(core.LDAPHandler):
ldap.set_option(ldap.OPT_X_TLS_CACERTFILE, tls_cacertfile)
elif tls_cacertdir:
ldap.set_option(ldap.OPT_X_TLS_CACERTDIR, tls_cacertdir)
- if tls_req_cert in core.LDAP_TLS_CERTS.values():
+ if tls_req_cert in list(core.LDAP_TLS_CERTS.values()):
ldap.set_option(ldap.OPT_X_TLS_REQUIRE_CERT, tls_req_cert)
else:
raise ValueError("invalid TLS_REQUIRE_CERT tls_req_cert=%s",
@@ -356,7 +359,7 @@ class FakeLdap(core.LDAPHandler):
return self.delete_ext_s(dn, serverctrls=[])
def _getChildren(self, dn):
- return [k for k, v in six.iteritems(self.db)
+ return [k for k, v in self.db.items()
if re.match('%s.*,%s' % (
re.escape(self.__prefix),
re.escape(self.dn(dn))), k)]
@@ -451,6 +454,10 @@ class FakeLdap(core.LDAPHandler):
if server_fail:
raise ldap.SERVER_DOWN
+ if (not filterstr) and (scope != ldap.SCOPE_BASE):
+ raise AssertionError('Search without filter on onelevel or '
+ 'subtree scope')
+
if scope == ldap.SCOPE_BASE:
try:
item_dict = self.db[self.key(base)]
@@ -473,7 +480,7 @@ class FakeLdap(core.LDAPHandler):
raise ldap.NO_SUCH_OBJECT
results = [(base, item_dict)]
extraresults = [(k[len(self.__prefix):], v)
- for k, v in six.iteritems(self.db)
+ for k, v in self.db.items()
if re.match('%s.*,%s' %
(re.escape(self.__prefix),
re.escape(self.dn(base))), k)]
@@ -484,7 +491,7 @@ class FakeLdap(core.LDAPHandler):
base_dn = ldap.dn.str2dn(core.utf8_encode(base))
base_len = len(base_dn)
- for k, v in six.iteritems(self.db):
+ for k, v in self.db.items():
if not k.startswith(self.__prefix):
continue
k_dn_str = k[len(self.__prefix):]
@@ -509,9 +516,15 @@ class FakeLdap(core.LDAPHandler):
id_val = core.utf8_decode(id_val)
match_attrs = attrs.copy()
match_attrs[id_attr] = [id_val]
- if not filterstr or _match_query(filterstr, match_attrs):
+ attrs_checked = set()
+ if not filterstr or _match_query(filterstr, match_attrs,
+ attrs_checked):
+ if (filterstr and
+ (scope != ldap.SCOPE_BASE) and
+ ('objectclass' not in attrs_checked)):
+ raise AssertionError('No objectClass in search filter')
# filter the attributes by attrlist
- attrs = {k: v for k, v in six.iteritems(attrs)
+ attrs = {k: v for k, v in attrs.items()
if not attrlist or k in attrlist}
objects.append((dn, attrs))
@@ -536,11 +549,11 @@ class FakeLdap(core.LDAPHandler):
class FakeLdapPool(FakeLdap):
- '''Emulate the python-ldap API with pooled connections using existing
- FakeLdap logic.
+ """Emulate the python-ldap API with pooled connections.
This class is used as connector class in PooledLDAPHandler.
- '''
+
+ """
def __init__(self, uri, retry_max=None, retry_delay=None, conn=None):
super(FakeLdapPool, self).__init__(conn=conn)
@@ -571,7 +584,7 @@ class FakeLdapPool(FakeLdap):
clientctrls=clientctrls)
def unbind_ext_s(self):
- '''Added to extend FakeLdap as connector class.'''
+ """Added to extend FakeLdap as connector class."""
pass
diff --git a/keystone-moon/keystone/tests/unit/filtering.py b/keystone-moon/keystone/tests/unit/filtering.py
index 1a31a23f..93e0bc28 100644
--- a/keystone-moon/keystone/tests/unit/filtering.py
+++ b/keystone-moon/keystone/tests/unit/filtering.py
@@ -15,6 +15,7 @@
import uuid
from oslo_config import cfg
+from six.moves import range
CONF = cfg.CONF
@@ -41,20 +42,50 @@ class FilterTests(object):
self.assertTrue(found)
def _create_entity(self, entity_type):
+ """Find the create_<entity_type> method.
+
+ Searches through the [identity_api, resource_api, assignment_api]
+ managers for a method called create_<entity_type> and returns the first
+ one.
+
+ """
+
f = getattr(self.identity_api, 'create_%s' % entity_type, None)
if f is None:
+ f = getattr(self.resource_api, 'create_%s' % entity_type, None)
+ if f is None:
f = getattr(self.assignment_api, 'create_%s' % entity_type)
return f
def _delete_entity(self, entity_type):
+ """Find the delete_<entity_type> method.
+
+ Searches through the [identity_api, resource_api, assignment_api]
+ managers for a method called delete_<entity_type> and returns the first
+ one.
+
+ """
+
f = getattr(self.identity_api, 'delete_%s' % entity_type, None)
if f is None:
+ f = getattr(self.resource_api, 'delete_%s' % entity_type, None)
+ if f is None:
f = getattr(self.assignment_api, 'delete_%s' % entity_type)
return f
def _list_entities(self, entity_type):
+ """Find the list_<entity_type> method.
+
+ Searches through the [identity_api, resource_api, assignment_api]
+ managers for a method called list_<entity_type> and returns the first
+ one.
+
+ """
+
f = getattr(self.identity_api, 'list_%ss' % entity_type, None)
if f is None:
+ f = getattr(self.resource_api, 'list_%ss' % entity_type, None)
+ if f is None:
f = getattr(self.assignment_api, 'list_%ss' % entity_type)
return f
diff --git a/keystone-moon/keystone/tests/unit/identity/test_core.py b/keystone-moon/keystone/tests/unit/identity/test_core.py
index 6c8faebb..fa95ec50 100644
--- a/keystone-moon/keystone/tests/unit/identity/test_core.py
+++ b/keystone-moon/keystone/tests/unit/identity/test_core.py
@@ -12,11 +12,13 @@
"""Unit tests for core identity behavior."""
+import itertools
import os
import uuid
import mock
from oslo_config import cfg
+from oslo_config import fixture as config_fixture
from keystone import exception
from keystone import identity
@@ -34,7 +36,10 @@ class TestDomainConfigs(tests.BaseTestCase):
self.addCleanup(CONF.reset)
self.tmp_dir = tests.dirs.tmp()
- CONF.set_override('domain_config_dir', self.tmp_dir, 'identity')
+
+ self.config_fixture = self.useFixture(config_fixture.Config(CONF))
+ self.config_fixture.config(domain_config_dir=self.tmp_dir,
+ group='identity')
def test_config_for_nonexistent_domain(self):
"""Having a config for a non-existent domain will be ignored.
@@ -80,6 +85,45 @@ class TestDomainConfigs(tests.BaseTestCase):
[domain_config_filename],
'abc.def.com')
+ def test_config_for_multiple_sql_backend(self):
+ domains_config = identity.DomainConfigs()
+
+ # Create the right sequence of is_sql in the drivers being
+ # requested to expose the bug, which is that a False setting
+ # means it forgets previous True settings.
+ drivers = []
+ files = []
+ for idx, is_sql in enumerate((True, False, True)):
+ drv = mock.Mock(is_sql=is_sql)
+ drivers.append(drv)
+ name = 'dummy.{0}'.format(idx)
+ files.append(''.join((
+ identity.DOMAIN_CONF_FHEAD,
+ name,
+ identity.DOMAIN_CONF_FTAIL)))
+
+ walk_fake = lambda *a, **kwa: (
+ ('/fake/keystone/domains/config', [], files), )
+
+ generic_driver = mock.Mock(is_sql=False)
+
+ assignment_api = mock.Mock()
+ id_factory = itertools.count()
+ assignment_api.get_domain_by_name.side_effect = (
+ lambda name: {'id': next(id_factory), '_': 'fake_domain'})
+ load_driver_mock = mock.Mock(side_effect=drivers)
+
+ with mock.patch.object(os, 'walk', walk_fake):
+ with mock.patch.object(identity.cfg, 'ConfigOpts'):
+ with mock.patch.object(domains_config, '_load_driver',
+ load_driver_mock):
+ self.assertRaises(
+ exception.MultipleSQLDriversInConfig,
+ domains_config.setup_domain_drivers,
+ generic_driver, assignment_api)
+
+ self.assertEqual(3, load_driver_mock.call_count)
+
class TestDatabaseDomainConfigs(tests.TestCase):
@@ -92,15 +136,16 @@ class TestDatabaseDomainConfigs(tests.TestCase):
self.assertFalse(CONF.identity.domain_configurations_from_database)
def test_loading_config_from_database(self):
- CONF.set_override('domain_configurations_from_database', True,
- 'identity')
+ self.config_fixture.config(domain_configurations_from_database=True,
+ group='identity')
domain = {'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex}
self.resource_api.create_domain(domain['id'], domain)
# Override two config options for our domain
conf = {'ldap': {'url': uuid.uuid4().hex,
- 'suffix': uuid.uuid4().hex},
+ 'suffix': uuid.uuid4().hex,
+ 'use_tls': 'True'},
'identity': {
- 'driver': 'keystone.identity.backends.ldap.Identity'}}
+ 'driver': 'ldap'}}
self.domain_config_api.create_config(domain['id'], conf)
fake_standard_driver = None
domain_config = identity.DomainConfigs()
@@ -112,6 +157,11 @@ class TestDatabaseDomainConfigs(tests.TestCase):
self.assertEqual(conf['ldap']['suffix'], res.ldap.suffix)
self.assertEqual(CONF.ldap.query_scope, res.ldap.query_scope)
+ # Make sure the override is not changing the type of the config value
+ use_tls_type = type(CONF.ldap.use_tls)
+ self.assertEqual(use_tls_type(conf['ldap']['use_tls']),
+ res.ldap.use_tls)
+
# Now turn off using database domain configuration and check that the
# default config file values are now seen instead of the overrides.
CONF.set_override('domain_configurations_from_database', False,
@@ -122,4 +172,5 @@ class TestDatabaseDomainConfigs(tests.TestCase):
res = domain_config.get_domain_conf(domain['id'])
self.assertEqual(CONF.ldap.url, res.ldap.url)
self.assertEqual(CONF.ldap.suffix, res.ldap.suffix)
+ self.assertEqual(CONF.ldap.use_tls, res.ldap.use_tls)
self.assertEqual(CONF.ldap.query_scope, res.ldap.query_scope)
diff --git a/keystone-moon/keystone/tests/unit/ksfixtures/database.py b/keystone-moon/keystone/tests/unit/ksfixtures/database.py
index 15597539..0012df74 100644
--- a/keystone-moon/keystone/tests/unit/ksfixtures/database.py
+++ b/keystone-moon/keystone/tests/unit/ksfixtures/database.py
@@ -13,15 +13,12 @@
import functools
import os
-import shutil
import fixtures
from oslo_config import cfg
from oslo_db import options as db_options
-from oslo_db.sqlalchemy import migration
from keystone.common import sql
-from keystone.common.sql import migration_helpers
from keystone.tests import unit as tests
@@ -42,23 +39,6 @@ def run_once(f):
return wrapper
-def _setup_database(extensions=None):
- if CONF.database.connection != tests.IN_MEM_DB_CONN_STRING:
- db = tests.dirs.tmp('test.db')
- pristine = tests.dirs.tmp('test.db.pristine')
-
- if os.path.exists(db):
- os.unlink(db)
- if not os.path.exists(pristine):
- migration.db_sync(sql.get_engine(),
- migration_helpers.find_migrate_repo())
- for extension in (extensions or []):
- migration_helpers.sync_database_to_version(extension=extension)
- shutil.copyfile(db, pristine)
- else:
- shutil.copyfile(pristine, db)
-
-
# NOTE(I159): Every execution all the options will be cleared. The method must
# be called at the every fixture initialization.
def initialize_sql_session():
@@ -108,17 +88,18 @@ class Database(fixtures.Fixture):
"""
- def __init__(self, extensions=None):
+ def __init__(self):
super(Database, self).__init__()
- self._extensions = extensions
initialize_sql_session()
_load_sqlalchemy_models()
def setUp(self):
super(Database, self).setUp()
- _setup_database(extensions=self._extensions)
self.engine = sql.get_engine()
- sql.ModelBase.metadata.create_all(bind=self.engine)
self.addCleanup(sql.cleanup)
+ sql.ModelBase.metadata.create_all(bind=self.engine)
self.addCleanup(sql.ModelBase.metadata.drop_all, bind=self.engine)
+
+ def recreate(self):
+ sql.ModelBase.metadata.create_all(bind=self.engine)
diff --git a/keystone-moon/keystone/tests/unit/ksfixtures/hacking.py b/keystone-moon/keystone/tests/unit/ksfixtures/hacking.py
index 47ef6b4b..918087ad 100644
--- a/keystone-moon/keystone/tests/unit/ksfixtures/hacking.py
+++ b/keystone-moon/keystone/tests/unit/ksfixtures/hacking.py
@@ -118,8 +118,8 @@ class HackingCode(fixtures.Fixture):
import logging as stlib_logging
from keystone.i18n import _
from keystone.i18n import _ as oslo_i18n
- from keystone.openstack.common import log
- from keystone.openstack.common import log as oslo_logging
+ from oslo_log import log
+ from oslo_log import log as oslo_logging
# stdlib logging
L0 = logging.getLogger()
@@ -138,7 +138,7 @@ class HackingCode(fixtures.Fixture):
)
# oslo logging and specifying a logger
- L2 = log.getLogger(__name__)
+ L2 = logging.getLogger(__name__)
L2.debug(oslo_i18n('text'))
# oslo logging w/ alias
@@ -179,84 +179,6 @@ class HackingCode(fixtures.Fixture):
]
}
- oslo_namespace_imports = {
- 'code': """
- import oslo.utils
- import oslo_utils
- import oslo.utils.encodeutils
- import oslo_utils.encodeutils
- from oslo import utils
- from oslo.utils import encodeutils
- from oslo_utils import encodeutils
-
- import oslo.serialization
- import oslo_serialization
- import oslo.serialization.jsonutils
- import oslo_serialization.jsonutils
- from oslo import serialization
- from oslo.serialization import jsonutils
- from oslo_serialization import jsonutils
-
- import oslo.messaging
- import oslo_messaging
- import oslo.messaging.conffixture
- import oslo_messaging.conffixture
- from oslo import messaging
- from oslo.messaging import conffixture
- from oslo_messaging import conffixture
-
- import oslo.db
- import oslo_db
- import oslo.db.api
- import oslo_db.api
- from oslo import db
- from oslo.db import api
- from oslo_db import api
-
- import oslo.config
- import oslo_config
- import oslo.config.cfg
- import oslo_config.cfg
- from oslo import config
- from oslo.config import cfg
- from oslo_config import cfg
-
- import oslo.i18n
- import oslo_i18n
- import oslo.i18n.log
- import oslo_i18n.log
- from oslo import i18n
- from oslo.i18n import log
- from oslo_i18n import log
- """,
- 'expected_errors': [
- (1, 0, 'K333'),
- (3, 0, 'K333'),
- (5, 0, 'K333'),
- (6, 0, 'K333'),
- (9, 0, 'K333'),
- (11, 0, 'K333'),
- (13, 0, 'K333'),
- (14, 0, 'K333'),
- (17, 0, 'K333'),
- (19, 0, 'K333'),
- (21, 0, 'K333'),
- (22, 0, 'K333'),
- (25, 0, 'K333'),
- (27, 0, 'K333'),
- (29, 0, 'K333'),
- (30, 0, 'K333'),
- (33, 0, 'K333'),
- (35, 0, 'K333'),
- (37, 0, 'K333'),
- (38, 0, 'K333'),
- (41, 0, 'K333'),
- (43, 0, 'K333'),
- (45, 0, 'K333'),
- (46, 0, 'K333'),
- ],
- }
-
dict_constructor = {
'code': """
lower_res = {k.lower(): v for k, v in six.iteritems(res[1])}
@@ -285,8 +207,8 @@ class HackingLogging(fixtures.Fixture):
from keystone.i18n import _LE as error_hint
from keystone.i18n import _LI
from keystone.i18n import _LW
- from keystone.openstack.common import log
- from keystone.openstack.common import log as oslo_logging
+ from oslo_log import log
+ from oslo_log import log as oslo_logging
"""
examples = [
diff --git a/keystone-moon/keystone/tests/unit/ksfixtures/key_repository.py b/keystone-moon/keystone/tests/unit/ksfixtures/key_repository.py
index d1ac2ab4..7784bddc 100644
--- a/keystone-moon/keystone/tests/unit/ksfixtures/key_repository.py
+++ b/keystone-moon/keystone/tests/unit/ksfixtures/key_repository.py
@@ -10,9 +10,6 @@
# License for the specific language governing permissions and limitations
# under the License.
-import shutil
-import tempfile
-
import fixtures
from keystone.token.providers.fernet import utils
@@ -25,8 +22,7 @@ class KeyRepository(fixtures.Fixture):
def setUp(self):
super(KeyRepository, self).setUp()
- directory = tempfile.mkdtemp()
- self.addCleanup(shutil.rmtree, directory)
+ directory = self.useFixture(fixtures.TempDir()).path
self.config_fixture.config(group='fernet_tokens',
key_repository=directory)
diff --git a/keystone-moon/keystone/tests/unit/ksfixtures/ldapdb.py b/keystone-moon/keystone/tests/unit/ksfixtures/ldapdb.py
new file mode 100644
index 00000000..b2cbe067
--- /dev/null
+++ b/keystone-moon/keystone/tests/unit/ksfixtures/ldapdb.py
@@ -0,0 +1,36 @@
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import fixtures
+
+from keystone.common import ldap as common_ldap
+from keystone.common.ldap import core as common_ldap_core
+from keystone.tests.unit import fakeldap
+
+
+class LDAPDatabase(fixtures.Fixture):
+ """A fixture for setting up and tearing down an LDAP database.
+ """
+
+ def setUp(self):
+ super(LDAPDatabase, self).setUp()
+ self.clear()
+ common_ldap_core._HANDLERS.clear()
+ common_ldap.register_handler('fake://', fakeldap.FakeLdap)
+ # TODO(dstanek): switch the flow here
+ self.addCleanup(self.clear)
+ self.addCleanup(common_ldap_core._HANDLERS.clear)
+
+ def clear(self):
+ for shelf in fakeldap.FakeShelves:
+ fakeldap.FakeShelves[shelf].clear()
diff --git a/keystone-moon/keystone/tests/unit/mapping_fixtures.py b/keystone-moon/keystone/tests/unit/mapping_fixtures.py
index 0892ada5..f86d9245 100644
--- a/keystone-moon/keystone/tests/unit/mapping_fixtures.py
+++ b/keystone-moon/keystone/tests/unit/mapping_fixtures.py
@@ -12,6 +12,9 @@
"""Fixtures for Federation Mapping."""
+from six.moves import range, zip
+
+
EMPLOYEE_GROUP_ID = "0cd5e9"
CONTRACTOR_GROUP_ID = "85a868"
TESTER_GROUP_ID = "123"
@@ -786,6 +789,7 @@ MAPPING_USER_IDS = {
{
"user": {
"name": "{0}",
+ "id": "abc123@example.com",
"domain": {
"id": "federated"
}
@@ -828,7 +832,7 @@ MAPPING_USER_IDS = {
"local": [
{
"user": {
- "id": "abc123",
+ "id": "abc123@example.com",
"name": "{0}",
"domain": {
"id": "federated"
@@ -963,6 +967,7 @@ TESTER_ASSERTION = {
}
ANOTHER_TESTER_ASSERTION = {
+ 'Email': 'testacct@example.com',
'UserName': 'IamTester'
}
@@ -989,8 +994,8 @@ MALFORMED_TESTER_ASSERTION = {
'LastName': 'Account',
'orgPersonType': 'Tester',
'object': object(),
- 'dictionary': dict(zip('teststring', xrange(10))),
- 'tuple': tuple(xrange(5))
+ 'dictionary': dict(zip('teststring', range(10))),
+ 'tuple': tuple(range(5))
}
DEVELOPER_ASSERTION = {
diff --git a/keystone-moon/keystone/tests/unit/rest.py b/keystone-moon/keystone/tests/unit/rest.py
index 16513024..bfa52354 100644
--- a/keystone-moon/keystone/tests/unit/rest.py
+++ b/keystone-moon/keystone/tests/unit/rest.py
@@ -13,7 +13,6 @@
# under the License.
from oslo_serialization import jsonutils
-import six
import webtest
from keystone.auth import controllers as auth_controllers
@@ -61,7 +60,7 @@ class RestfulTestCase(tests.TestCase):
# Will need to reset the plug-ins
self.addCleanup(setattr, auth_controllers, 'AUTH_METHODS', {})
- self.useFixture(database.Database(extensions=self.get_extensions()))
+ self.useFixture(database.Database())
self.load_backends()
self.load_fixtures(default_fixtures)
@@ -75,7 +74,7 @@ class RestfulTestCase(tests.TestCase):
def request(self, app, path, body=None, headers=None, token=None,
expected_status=None, **kwargs):
if headers:
- headers = {str(k): str(v) for k, v in six.iteritems(headers)}
+ headers = {str(k): str(v) for k, v in headers.items()}
else:
headers = {}
@@ -119,7 +118,7 @@ class RestfulTestCase(tests.TestCase):
self.assertEqual(
response.status_code,
expected_status,
- 'Status code %s is not %s, as expected)\n\n%s' %
+ 'Status code %s is not %s, as expected\n\n%s' %
(response.status_code, expected_status, response.body))
def assertValidResponseHeaders(self, response):
diff --git a/keystone-moon/keystone/tests/unit/saml2/signed_saml2_assertion.xml b/keystone-moon/keystone/tests/unit/saml2/signed_saml2_assertion.xml
index 410f9388..414ff9cf 100644
--- a/keystone-moon/keystone/tests/unit/saml2/signed_saml2_assertion.xml
+++ b/keystone-moon/keystone/tests/unit/saml2/signed_saml2_assertion.xml
@@ -49,15 +49,21 @@ UHeBXxQq/GmfBv3l+V5ObQ+EHKnyDodLHCk=</ns1:X509Certificate>
</ns0:AuthnContext>
</ns0:AuthnStatement>
<ns0:AttributeStatement>
- <ns0:Attribute FriendlyName="keystone_user" Name="user" NameFormat="urn:oasis:names:tc:SAML:2.0:attrname-format:uri">
+ <ns0:Attribute Name="openstack_user" NameFormat="urn:oasis:names:tc:SAML:2.0:attrname-format:uri">
<ns0:AttributeValue xsi:type="xs:string">test_user</ns0:AttributeValue>
</ns0:Attribute>
- <ns0:Attribute FriendlyName="keystone_roles" Name="roles" NameFormat="urn:oasis:names:tc:SAML:2.0:attrname-format:uri">
+ <ns0:Attribute Name="openstack_user_domain" NameFormat="urn:oasis:names:tc:SAML:2.0:attrname-format:uri">
+ <ns0:AttributeValue xsi:type="xs:string">user_domain</ns0:AttributeValue>
+ </ns0:Attribute>
+ <ns0:Attribute Name="openstack_roles" NameFormat="urn:oasis:names:tc:SAML:2.0:attrname-format:uri">
<ns0:AttributeValue xsi:type="xs:string">admin</ns0:AttributeValue>
<ns0:AttributeValue xsi:type="xs:string">member</ns0:AttributeValue>
</ns0:Attribute>
- <ns0:Attribute FriendlyName="keystone_project" Name="project" NameFormat="urn:oasis:names:tc:SAML:2.0:attrname-format:uri">
+ <ns0:Attribute Name="openstack_project" NameFormat="urn:oasis:names:tc:SAML:2.0:attrname-format:uri">
<ns0:AttributeValue xsi:type="xs:string">development</ns0:AttributeValue>
</ns0:Attribute>
+ <ns0:Attribute Name="openstack_project_domain" NameFormat="urn:oasis:names:tc:SAML:2.0:attrname-format:uri">
+ <ns0:AttributeValue xsi:type="xs:string">project_domain</ns0:AttributeValue>
+ </ns0:Attribute>
</ns0:AttributeStatement>
</ns0:Assertion>
diff --git a/keystone-moon/keystone/tests/unit/test_associate_project_endpoint_extension.py b/keystone-moon/keystone/tests/unit/test_associate_project_endpoint_extension.py
index e0159b76..9cde704e 100644
--- a/keystone-moon/keystone/tests/unit/test_associate_project_endpoint_extension.py
+++ b/keystone-moon/keystone/tests/unit/test_associate_project_endpoint_extension.py
@@ -17,8 +17,6 @@ import uuid
from testtools import matchers
-# NOTE(morganfainberg): import endpoint filter to populate the SQL model
-from keystone.contrib import endpoint_filter # noqa
from keystone.tests.unit import test_v3
@@ -30,9 +28,7 @@ class TestExtensionCase(test_v3.RestfulTestCase):
def config_overrides(self):
super(TestExtensionCase, self).config_overrides()
self.config_fixture.config(
- group='catalog',
- driver='keystone.contrib.endpoint_filter.backends.catalog_sql.'
- 'EndpointFilterCatalog')
+ group='catalog', driver='endpoint_filter.sql')
def setUp(self):
super(TestExtensionCase, self).setUp()
@@ -52,7 +48,6 @@ class EndpointFilterCRUDTestCase(TestExtensionCase):
"""
self.put(self.default_request_url,
- body='',
expected_status=204)
def test_create_endpoint_project_association_with_invalid_project(self):
@@ -65,7 +60,6 @@ class EndpointFilterCRUDTestCase(TestExtensionCase):
'/endpoints/%(endpoint_id)s' % {
'project_id': uuid.uuid4().hex,
'endpoint_id': self.endpoint_id},
- body='',
expected_status=404)
def test_create_endpoint_project_association_with_invalid_endpoint(self):
@@ -78,7 +72,6 @@ class EndpointFilterCRUDTestCase(TestExtensionCase):
'/endpoints/%(endpoint_id)s' % {
'project_id': self.default_domain_project_id,
'endpoint_id': uuid.uuid4().hex},
- body='',
expected_status=404)
def test_create_endpoint_project_association_with_unexpected_body(self):
@@ -98,7 +91,6 @@ class EndpointFilterCRUDTestCase(TestExtensionCase):
"""
self.put(self.default_request_url,
- body='',
expected_status=204)
self.head('/OS-EP-FILTER/projects/%(project_id)s'
'/endpoints/%(endpoint_id)s' % {
@@ -117,7 +109,6 @@ class EndpointFilterCRUDTestCase(TestExtensionCase):
'/endpoints/%(endpoint_id)s' % {
'project_id': uuid.uuid4().hex,
'endpoint_id': self.endpoint_id},
- body='',
expected_status=404)
def test_check_endpoint_project_association_with_invalid_endpoint(self):
@@ -131,7 +122,6 @@ class EndpointFilterCRUDTestCase(TestExtensionCase):
'/endpoints/%(endpoint_id)s' % {
'project_id': self.default_domain_project_id,
'endpoint_id': uuid.uuid4().hex},
- body='',
expected_status=404)
def test_list_endpoints_associated_with_valid_project(self):
@@ -156,7 +146,6 @@ class EndpointFilterCRUDTestCase(TestExtensionCase):
self.put(self.default_request_url)
self.get('/OS-EP-FILTER/projects/%(project_id)s/endpoints' % {
'project_id': uuid.uuid4().hex},
- body='',
expected_status=404)
def test_list_projects_associated_with_endpoint(self):
@@ -217,7 +206,6 @@ class EndpointFilterCRUDTestCase(TestExtensionCase):
'/endpoints/%(endpoint_id)s' % {
'project_id': uuid.uuid4().hex,
'endpoint_id': self.endpoint_id},
- body='',
expected_status=404)
def test_remove_endpoint_project_association_with_invalid_endpoint(self):
@@ -231,7 +219,6 @@ class EndpointFilterCRUDTestCase(TestExtensionCase):
'/endpoints/%(endpoint_id)s' % {
'project_id': self.default_domain_project_id,
'endpoint_id': uuid.uuid4().hex},
- body='',
expected_status=404)
def test_endpoint_project_association_cleanup_when_project_deleted(self):
@@ -289,7 +276,6 @@ class EndpointFilterTokenRequestTestCase(TestExtensionCase):
'/endpoints/%(endpoint_id)s' % {
'project_id': project['id'],
'endpoint_id': self.endpoint_id},
- body='',
expected_status=204)
# attempt to authenticate without requesting a project
@@ -311,7 +297,6 @@ class EndpointFilterTokenRequestTestCase(TestExtensionCase):
'/endpoints/%(endpoint_id)s' % {
'project_id': self.project['id'],
'endpoint_id': self.endpoint_id},
- body='',
expected_status=204)
auth_data = self.build_authentication_request(
@@ -327,65 +312,12 @@ class EndpointFilterTokenRequestTestCase(TestExtensionCase):
self.assertEqual(r.result['token']['project']['id'],
self.project['id'])
- def test_project_scoped_token_with_no_catalog_using_endpoint_filter(self):
- """Verify endpoint filter when project scoped token returns no catalog.
-
- Test that the project scoped token response is valid for a given
- endpoint-project association when no service catalog is returned.
-
- """
- # create a project to work with
- ref = self.new_project_ref(domain_id=self.domain_id)
- r = self.post('/projects', body={'project': ref})
- project = self.assertValidProjectResponse(r, ref)
-
- # grant the user a role on the project
- self.put(
- '/projects/%(project_id)s/users/%(user_id)s/roles/%(role_id)s' % {
- 'user_id': self.user['id'],
- 'project_id': project['id'],
- 'role_id': self.role['id']})
-
- # set the user's preferred project
- body = {'user': {'default_project_id': project['id']}}
- r = self.patch('/users/%(user_id)s' % {
- 'user_id': self.user['id']},
- body=body)
- self.assertValidUserResponse(r)
-
- # add one endpoint to the project
- self.put('/OS-EP-FILTER/projects/%(project_id)s'
- '/endpoints/%(endpoint_id)s' % {
- 'project_id': project['id'],
- 'endpoint_id': self.endpoint_id},
- body='',
- expected_status=204)
-
- # attempt to authenticate without requesting a project
- auth_data = self.build_authentication_request(
- user_id=self.user['id'],
- password=self.user['password'])
- r = self.post('/auth/tokens?nocatalog', body=auth_data)
- self.assertValidProjectScopedTokenResponse(
- r,
- require_catalog=False,
- endpoint_filter=True,
- ep_filter_assoc=1)
- self.assertEqual(r.result['token']['project']['id'], project['id'])
-
- def test_default_scoped_token_with_no_catalog_using_endpoint_filter(self):
- """Verify endpoint filter when default scoped token returns no catalog.
-
- Test that the default project scoped token response is valid for a
- given endpoint-project association when no service catalog is returned.
-
- """
- # add one endpoint to default project
+ def test_scoped_token_with_no_catalog_using_endpoint_filter(self):
+ """Verify endpoint filter does not affect no catalog."""
self.put('/OS-EP-FILTER/projects/%(project_id)s'
'/endpoints/%(endpoint_id)s' % {
'project_id': self.project['id'],
'endpoint_id': self.endpoint_id},
- body='',
expected_status=204)
auth_data = self.build_authentication_request(
@@ -395,65 +327,7 @@ class EndpointFilterTokenRequestTestCase(TestExtensionCase):
r = self.post('/auth/tokens?nocatalog', body=auth_data)
self.assertValidProjectScopedTokenResponse(
r,
- require_catalog=False,
- endpoint_filter=True,
- ep_filter_assoc=1)
- self.assertEqual(r.result['token']['project']['id'],
- self.project['id'])
-
- def test_project_scoped_token_with_no_endpoint_project_association(self):
- """Verify endpoint filter when no endpoint-project association.
-
- Test that the project scoped token response is valid when there are
- no endpoint-project associations defined.
-
- """
- # create a project to work with
- ref = self.new_project_ref(domain_id=self.domain_id)
- r = self.post('/projects', body={'project': ref})
- project = self.assertValidProjectResponse(r, ref)
-
- # grant the user a role on the project
- self.put(
- '/projects/%(project_id)s/users/%(user_id)s/roles/%(role_id)s' % {
- 'user_id': self.user['id'],
- 'project_id': project['id'],
- 'role_id': self.role['id']})
-
- # set the user's preferred project
- body = {'user': {'default_project_id': project['id']}}
- r = self.patch('/users/%(user_id)s' % {
- 'user_id': self.user['id']},
- body=body)
- self.assertValidUserResponse(r)
-
- # attempt to authenticate without requesting a project
- auth_data = self.build_authentication_request(
- user_id=self.user['id'],
- password=self.user['password'])
- r = self.post('/auth/tokens?nocatalog', body=auth_data)
- self.assertValidProjectScopedTokenResponse(
- r,
- require_catalog=False,
- endpoint_filter=True)
- self.assertEqual(r.result['token']['project']['id'], project['id'])
-
- def test_default_scoped_token_with_no_endpoint_project_association(self):
- """Verify endpoint filter when no endpoint-project association.
-
- Test that the default project scoped token response is valid when
- there are no endpoint-project associations defined.
-
- """
- auth_data = self.build_authentication_request(
- user_id=self.user['id'],
- password=self.user['password'],
- project_id=self.project['id'])
- r = self.post('/auth/tokens?nocatalog', body=auth_data)
- self.assertValidProjectScopedTokenResponse(
- r,
- require_catalog=False,
- endpoint_filter=True,)
+ require_catalog=False)
self.assertEqual(r.result['token']['project']['id'],
self.project['id'])
@@ -464,7 +338,6 @@ class EndpointFilterTokenRequestTestCase(TestExtensionCase):
'/endpoints/%(endpoint_id)s' % {
'project_id': self.project['id'],
'endpoint_id': self.endpoint_id},
- body='',
expected_status=204)
# create a second temporary endpoint
@@ -480,7 +353,6 @@ class EndpointFilterTokenRequestTestCase(TestExtensionCase):
'/endpoints/%(endpoint_id)s' % {
'project_id': self.project['id'],
'endpoint_id': self.endpoint_id2},
- body='',
expected_status=204)
# remove the temporary reference
@@ -576,6 +448,30 @@ class EndpointFilterTokenRequestTestCase(TestExtensionCase):
endpoint_filter=True,
ep_filter_assoc=2)
+ def test_get_auth_catalog_using_endpoint_filter(self):
+ # add one endpoint to default project
+ self.put('/OS-EP-FILTER/projects/%(project_id)s'
+ '/endpoints/%(endpoint_id)s' % {
+ 'project_id': self.project['id'],
+ 'endpoint_id': self.endpoint_id},
+ expected_status=204)
+
+ auth_data = self.build_authentication_request(
+ user_id=self.user['id'],
+ password=self.user['password'],
+ project_id=self.project['id'])
+ token_data = self.post('/auth/tokens', body=auth_data)
+ self.assertValidProjectScopedTokenResponse(
+ token_data,
+ require_catalog=True,
+ endpoint_filter=True,
+ ep_filter_assoc=1)
+
+ auth_catalog = self.get('/auth/catalog',
+ token=token_data.headers['X-Subject-Token'])
+ self.assertEqual(token_data.result['token']['catalog'],
+ auth_catalog.result['catalog'])
+
class JsonHomeTests(TestExtensionCase, test_v3.JsonHomeTestMixin):
JSON_HOME_DATA = {
@@ -635,6 +531,16 @@ class JsonHomeTests(TestExtensionCase, test_v3.JsonHomeTestMixin):
'ext/OS-EP-FILTER/1.0/param/endpoint_group_id',
},
},
+ 'http://docs.openstack.org/api/openstack-identity/3/ext/OS-EP-FILTER/'
+ '1.0/rel/project_endpoint_groups': {
+ 'href-template': '/OS-EP-FILTER/projects/{project_id}/'
+ 'endpoint_groups',
+ 'href-vars': {
+ 'project_id':
+ 'http://docs.openstack.org/api/openstack-identity/3/param/'
+ 'project_id',
+ },
+ },
}
@@ -883,6 +789,40 @@ class EndpointGroupCRUDTestCase(TestExtensionCase):
endpoint_group_id, project_id)
self.get(url, expected_status=404)
+ def test_list_endpoint_groups_in_project(self):
+ """GET /OS-EP-FILTER/projects/{project_id}/endpoint_groups."""
+ # create an endpoint group to work with
+ endpoint_group_id = self._create_valid_endpoint_group(
+ self.DEFAULT_ENDPOINT_GROUP_URL, self.DEFAULT_ENDPOINT_GROUP_BODY)
+
+ # associate endpoint group with project
+ url = self._get_project_endpoint_group_url(
+ endpoint_group_id, self.project_id)
+ self.put(url)
+
+ url = ('/OS-EP-FILTER/projects/%(project_id)s/endpoint_groups' %
+ {'project_id': self.project_id})
+ response = self.get(url)
+
+ self.assertEqual(
+ endpoint_group_id,
+ response.result['endpoint_groups'][0]['id'])
+
+ def test_list_endpoint_groups_in_invalid_project(self):
+ """Test retrieving from invalid project."""
+ project_id = uuid.uuid4().hex
+ url = ('/OS-EP-FILTER/projects/%(project_id)s/endpoint_groups' %
+ {'project_id': project_id})
+ self.get(url, expected_status=404)
+
+ def test_empty_endpoint_groups_in_project(self):
+ """Test when no endpoint groups associated with the project."""
+ url = ('/OS-EP-FILTER/projects/%(project_id)s/endpoint_groups' %
+ {'project_id': self.project_id})
+ response = self.get(url)
+
+ self.assertEqual(0, len(response.result['endpoint_groups']))
+
def test_check_endpoint_group_to_project(self):
"""Test HEAD with a valid endpoint group and project association."""
endpoint_group_id = self._create_valid_endpoint_group(
@@ -1088,6 +1028,25 @@ class EndpointGroupCRUDTestCase(TestExtensionCase):
self.delete(url)
self.get(url, expected_status=404)
+ def test_remove_endpoint_group_with_project_association(self):
+ # create an endpoint group
+ endpoint_group_id = self._create_valid_endpoint_group(
+ self.DEFAULT_ENDPOINT_GROUP_URL, self.DEFAULT_ENDPOINT_GROUP_BODY)
+
+ # create an endpoint_group project
+ project_endpoint_group_url = self._get_project_endpoint_group_url(
+ endpoint_group_id, self.default_domain_project_id)
+ self.put(project_endpoint_group_url)
+
+ # remove endpoint group, the associated endpoint_group project will
+ # be removed as well.
+ endpoint_group_url = ('/OS-EP-FILTER/endpoint_groups/'
+ '%(endpoint_group_id)s'
+ % {'endpoint_group_id': endpoint_group_id})
+ self.delete(endpoint_group_url)
+ self.get(endpoint_group_url, expected_status=404)
+ self.get(project_endpoint_group_url, expected_status=404)
+
def _create_valid_endpoint_group(self, url, body):
r = self.post(url, body=body)
return r.result['endpoint_group']['id']
diff --git a/keystone-moon/keystone/tests/unit/test_auth.py b/keystone-moon/keystone/tests/unit/test_auth.py
index 295e028d..f253b02d 100644
--- a/keystone-moon/keystone/tests/unit/test_auth.py
+++ b/keystone-moon/keystone/tests/unit/test_auth.py
@@ -18,7 +18,9 @@ import uuid
import mock
from oslo_config import cfg
+import oslo_utils.fixture
from oslo_utils import timeutils
+import six
from testtools import matchers
from keystone import assignment
@@ -74,6 +76,7 @@ class AuthTest(tests.TestCase):
def setUp(self):
self.useFixture(database.Database())
super(AuthTest, self).setUp()
+ self.time_fixture = self.useFixture(oslo_utils.fixture.TimeFixture())
self.load_backends()
self.load_fixtures(default_fixtures)
@@ -265,12 +268,12 @@ class AuthWithToken(AuthTest):
self.user_foo['id'],
self.tenant_bar['id'],
self.role_member['id'])
- # Get an unscoped tenant
+ # Get an unscoped token
body_dict = _build_user_auth(
username='FOO',
password='foo2')
unscoped_token = self.controller.authenticate({}, body_dict)
- # Get a token on BAR tenant using the unscoped tenant
+ # Get a token on BAR tenant using the unscoped token
body_dict = _build_user_auth(
token=unscoped_token["access"]["token"],
tenant_name="BAR")
@@ -281,6 +284,50 @@ class AuthWithToken(AuthTest):
self.assertEqual(self.tenant_bar['id'], tenant["id"])
self.assertThat(roles, matchers.Contains(self.role_member['id']))
+ def test_auth_scoped_token_bad_project_with_debug(self):
+ """Authenticating with an invalid project fails."""
+ # Bug 1379952 reports poor user feedback, even in debug mode,
+ # when the user accidentally passes a project name as an ID.
+ # This test intentionally does exactly that.
+ body_dict = _build_user_auth(
+ username=self.user_foo['name'],
+ password=self.user_foo['password'],
+ tenant_id=self.tenant_bar['name'])
+
+ # with debug enabled, this produces a friendly exception.
+ self.config_fixture.config(debug=True)
+ e = self.assertRaises(
+ exception.Unauthorized,
+ self.controller.authenticate,
+ {}, body_dict)
+ # explicitly verify that the error message shows that a *name* is
+ # found where an *ID* is expected
+ self.assertIn(
+ 'Project ID not found: %s' % self.tenant_bar['name'],
+ six.text_type(e))
+
+ def test_auth_scoped_token_bad_project_without_debug(self):
+ """Authenticating with an invalid project fails."""
+ # Bug 1379952 reports poor user feedback, even in debug mode,
+ # when the user accidentally passes a project name as an ID.
+ # This test intentionally does exactly that.
+ body_dict = _build_user_auth(
+ username=self.user_foo['name'],
+ password=self.user_foo['password'],
+ tenant_id=self.tenant_bar['name'])
+
+ # with debug disabled, authentication failure details are suppressed.
+ self.config_fixture.config(debug=False)
+ e = self.assertRaises(
+ exception.Unauthorized,
+ self.controller.authenticate,
+ {}, body_dict)
+ # explicitly verify that the error message details above have been
+ # suppressed.
+ self.assertNotIn(
+ 'Project ID not found: %s' % self.tenant_bar['name'],
+ six.text_type(e))
+
def test_auth_token_project_group_role(self):
"""Verify getting a token in a tenant with group roles."""
# Add a v2 style role in so we can check we get this back
@@ -448,10 +495,13 @@ class AuthWithToken(AuthTest):
body_dict = _build_user_auth(username='FOO', password='foo2')
unscoped_token = self.controller.authenticate(context, body_dict)
token_id = unscoped_token['access']['token']['id']
+ self.time_fixture.advance_time_seconds(1)
+
# get a second token
body_dict = _build_user_auth(token=unscoped_token["access"]["token"])
unscoped_token_2 = self.controller.authenticate(context, body_dict)
token_2_id = unscoped_token_2['access']['token']['id']
+ self.time_fixture.advance_time_seconds(1)
self.token_provider_api.revoke_token(token_id, revoke_chain=True)
@@ -470,10 +520,13 @@ class AuthWithToken(AuthTest):
body_dict = _build_user_auth(username='FOO', password='foo2')
unscoped_token = self.controller.authenticate(context, body_dict)
token_id = unscoped_token['access']['token']['id']
+ self.time_fixture.advance_time_seconds(1)
+
# get a second token
body_dict = _build_user_auth(token=unscoped_token["access"]["token"])
unscoped_token_2 = self.controller.authenticate(context, body_dict)
token_2_id = unscoped_token_2['access']['token']['id']
+ self.time_fixture.advance_time_seconds(1)
self.token_provider_api.revoke_token(token_2_id, revoke_chain=True)
@@ -500,13 +553,17 @@ class AuthWithToken(AuthTest):
body_dict = _build_user_auth(username='FOO', password='foo2')
unscoped_token = self.controller.authenticate(context, body_dict)
token_id = unscoped_token['access']['token']['id']
+ self.time_fixture.advance_time_seconds(1)
+
# get a second token
body_dict = _build_user_auth(
token=unscoped_token['access']['token'])
unscoped_token_2 = self.controller.authenticate(context, body_dict)
token_2_id = unscoped_token_2['access']['token']['id']
+ self.time_fixture.advance_time_seconds(1)
self.token_provider_api.revoke_token(token_id, revoke_chain=True)
+ self.time_fixture.advance_time_seconds(1)
revoke_events = self.revoke_api.list_events()
self.assertThat(revoke_events, matchers.HasLength(1))
@@ -526,15 +583,18 @@ class AuthWithToken(AuthTest):
body_dict = _build_user_auth(username='FOO', password='foo2')
unscoped_token = self.controller.authenticate(context, body_dict)
token_id = unscoped_token['access']['token']['id']
+ self.time_fixture.advance_time_seconds(1)
# get a second token
body_dict = _build_user_auth(
token=unscoped_token['access']['token'])
unscoped_token_2 = self.controller.authenticate(context, body_dict)
token_2_id = unscoped_token_2['access']['token']['id']
+ self.time_fixture.advance_time_seconds(1)
# Revoke by audit_id, no audit_info means both parent and child
# token are revoked.
self.token_provider_api.revoke_token(token_id)
+ self.time_fixture.advance_time_seconds(1)
revoke_events = self.revoke_api.list_events()
self.assertThat(revoke_events, matchers.HasLength(2))
@@ -819,9 +879,8 @@ class AuthWithTrust(AuthTest):
context, trust=self.sample_data)
def test_create_trust(self):
- expires_at = timeutils.strtime(timeutils.utcnow() +
- datetime.timedelta(minutes=10),
- fmt=TIME_FORMAT)
+ expires_at = (timeutils.utcnow() +
+ datetime.timedelta(minutes=10)).strftime(TIME_FORMAT)
new_trust = self.create_trust(self.sample_data, self.trustor['name'],
expires_at=expires_at)
self.assertEqual(self.trustor['id'], new_trust['trustor_user_id'])
@@ -848,6 +907,12 @@ class AuthWithTrust(AuthTest):
self.create_trust, self.sample_data,
self.trustor['name'], expires_at="Z")
+ def test_create_trust_expires_older_than_now(self):
+ self.assertRaises(exception.ValidationExpirationError,
+ self.create_trust, self.sample_data,
+ self.trustor['name'],
+ expires_at="2010-06-04T08:44:31.999999Z")
+
def test_create_trust_without_project_id(self):
"""Verify that trust can be created without project id and
token can be generated with that trust.
@@ -868,8 +933,8 @@ class AuthWithTrust(AuthTest):
def test_get_trust(self):
unscoped_token = self.get_unscoped_token(self.trustor['name'])
- context = {'token_id': unscoped_token['access']['token']['id'],
- 'host_url': HOST_URL}
+ context = self._create_auth_context(
+ unscoped_token['access']['token']['id'])
new_trust = self.trust_controller.create_trust(
context, trust=self.sample_data)['trust']
trust = self.trust_controller.get_trust(context,
@@ -880,6 +945,21 @@ class AuthWithTrust(AuthTest):
for role in new_trust['roles']:
self.assertIn(role['id'], role_ids)
+ def test_get_trust_without_auth_context(self):
+ """Verify that a trust cannot be retrieved when the auth context is
+ missing.
+ """
+ unscoped_token = self.get_unscoped_token(self.trustor['name'])
+ context = self._create_auth_context(
+ unscoped_token['access']['token']['id'])
+ new_trust = self.trust_controller.create_trust(
+ context, trust=self.sample_data)['trust']
+ # Delete the auth context before calling get_trust().
+ del context['environment'][authorization.AUTH_CONTEXT_ENV]
+ self.assertRaises(exception.Forbidden,
+ self.trust_controller.get_trust, context,
+ new_trust['id'])
+
def test_create_trust_no_impersonation(self):
new_trust = self.create_trust(self.sample_data, self.trustor['name'],
expires_at=None, impersonation=False)
@@ -1051,13 +1131,18 @@ class AuthWithTrust(AuthTest):
self.controller.authenticate, {}, request_body)
def test_expired_trust_get_token_fails(self):
- expiry = "1999-02-18T10:10:00Z"
+ expires_at = (timeutils.utcnow() +
+ datetime.timedelta(minutes=5)).strftime(TIME_FORMAT)
+ time_expired = timeutils.utcnow() + datetime.timedelta(minutes=10)
new_trust = self.create_trust(self.sample_data, self.trustor['name'],
- expiry)
- request_body = self.build_v2_token_request('TWO', 'two2', new_trust)
- self.assertRaises(
- exception.Forbidden,
- self.controller.authenticate, {}, request_body)
+ expires_at)
+ with mock.patch.object(timeutils, 'utcnow') as mock_now:
+ mock_now.return_value = time_expired
+ request_body = self.build_v2_token_request('TWO', 'two2',
+ new_trust)
+ self.assertRaises(
+ exception.Forbidden,
+ self.controller.authenticate, {}, request_body)
def test_token_from_trust_with_wrong_role_fails(self):
new_trust = self.create_trust(self.sample_data, self.trustor['name'])
@@ -1196,9 +1281,7 @@ class TokenExpirationTest(AuthTest):
self.assertEqual(original_expiration, r['access']['token']['expires'])
def test_maintain_uuid_token_expiration(self):
- self.config_fixture.config(
- group='token',
- provider='keystone.token.providers.uuid.Provider')
+ self.config_fixture.config(group='token', provider='uuid')
self._maintain_token_expiration()
diff --git a/keystone-moon/keystone/tests/unit/test_auth_plugin.py b/keystone-moon/keystone/tests/unit/test_auth_plugin.py
index 11df95a5..a259cc2a 100644
--- a/keystone-moon/keystone/tests/unit/test_auth_plugin.py
+++ b/keystone-moon/keystone/tests/unit/test_auth_plugin.py
@@ -28,9 +28,6 @@ DEMO_USER_ID = uuid.uuid4().hex
class SimpleChallengeResponse(auth.AuthMethodHandler):
-
- method = METHOD_NAME
-
def authenticate(self, context, auth_payload, user_context):
if 'response' in auth_payload:
if auth_payload['response'] != EXPECTED_RESPONSE:
@@ -40,20 +37,6 @@ class SimpleChallengeResponse(auth.AuthMethodHandler):
return {"challenge": "What's the name of your high school?"}
-class DuplicateAuthPlugin(SimpleChallengeResponse):
- """Duplicate simple challenge response auth plugin."""
-
-
-class MismatchedAuthPlugin(SimpleChallengeResponse):
- method = uuid.uuid4().hex
-
-
-class NoMethodAuthPlugin(auth.AuthMethodHandler):
- """An auth plugin that does not supply a method attribute."""
- def authenticate(self, context, auth_payload, auth_context):
- pass
-
-
class TestAuthPlugin(tests.SQLDriverOverrides, tests.TestCase):
def setUp(self):
super(TestAuthPlugin, self).setUp()
@@ -64,9 +47,6 @@ class TestAuthPlugin(tests.SQLDriverOverrides, tests.TestCase):
def config_overrides(self):
super(TestAuthPlugin, self).config_overrides()
method_opts = {
- 'external': 'keystone.auth.plugins.external.DefaultDomain',
- 'password': 'keystone.auth.plugins.password.Password',
- 'token': 'keystone.auth.plugins.token.Token',
METHOD_NAME:
'keystone.tests.unit.test_auth_plugin.SimpleChallengeResponse',
}
@@ -123,6 +103,14 @@ class TestAuthPlugin(tests.SQLDriverOverrides, tests.TestCase):
auth_info,
auth_context)
+ def test_duplicate_method(self):
+ # Having the same method twice doesn't cause load_auth_methods to fail.
+ self.auth_plugin_config_override(
+ methods=['external', 'external'])
+ self.clear_auth_plugin_registry()
+ auth.controllers.load_auth_methods()
+ self.assertIn('external', auth.controllers.AUTH_METHODS)
+
class TestAuthPluginDynamicOptions(TestAuthPlugin):
def config_overrides(self):
@@ -137,25 +125,6 @@ class TestAuthPluginDynamicOptions(TestAuthPlugin):
return config_files
-class TestInvalidAuthMethodRegistration(tests.TestCase):
- def test_duplicate_auth_method_registration(self):
- self.config_fixture.config(
- group='auth',
- methods=[
- 'keystone.tests.unit.test_auth_plugin.SimpleChallengeResponse',
- 'keystone.tests.unit.test_auth_plugin.DuplicateAuthPlugin'])
- self.clear_auth_plugin_registry()
- self.assertRaises(ValueError, auth.controllers.load_auth_methods)
-
- def test_no_method_attribute_auth_method_by_class_name_registration(self):
- self.config_fixture.config(
- group='auth',
- methods=['keystone.tests.unit.test_auth_plugin.NoMethodAuthPlugin']
- )
- self.clear_auth_plugin_registry()
- self.assertRaises(ValueError, auth.controllers.load_auth_methods)
-
-
class TestMapped(tests.TestCase):
def setUp(self):
super(TestMapped, self).setUp()
@@ -168,8 +137,9 @@ class TestMapped(tests.TestCase):
config_files.append(tests.dirs.tests_conf('test_auth_plugin.conf'))
return config_files
- def config_overrides(self):
- # don't override configs so we can use test_auth_plugin.conf only
+ def auth_plugin_config_override(self, methods=None, **method_classes):
+ # Do not apply the auth plugin overrides so that the config file is
+ # tested
pass
def _test_mapped_invocation_with_method_name(self, method_name):
diff --git a/keystone-moon/keystone/tests/unit/test_backend.py b/keystone-moon/keystone/tests/unit/test_backend.py
index 6cf06494..45b8e0b0 100644
--- a/keystone-moon/keystone/tests/unit/test_backend.py
+++ b/keystone-moon/keystone/tests/unit/test_backend.py
@@ -22,6 +22,7 @@ import mock
from oslo_config import cfg
from oslo_utils import timeutils
import six
+from six.moves import range
from testtools import matchers
from keystone.catalog import core
@@ -505,7 +506,7 @@ class IdentityTests(object):
'fake2')
def test_list_role_assignments_unfiltered(self):
- """Test for unfiltered listing role assignments.
+ """Test unfiltered listing of role assignments.
Test Plan:
@@ -533,9 +534,6 @@ class IdentityTests(object):
# First check how many role grants already exist
existing_assignments = len(self.assignment_api.list_role_assignments())
- existing_assignments_for_role = len(
- self.assignment_api.list_role_assignments_for_role(
- role_id='admin'))
# Now create the grants (roles are defined in default_fixtures)
self.assignment_api.create_grant(user_id=new_user['id'],
@@ -573,6 +571,48 @@ class IdentityTests(object):
'role_id': 'admin'},
assignment_list)
+ def test_list_role_assignments_filtered_by_role(self):
+ """Test listing of role assignments filtered by role ID.
+
+ Test Plan:
+
+ - Create a user, group & project
+ - Find how many role assignments already exist (from default
+ fixtures)
+ - Create a grant of each type (user/group on project/domain)
+ - Check that if we list assignments by role_id, then we get back
+ assignments that only contain that role.
+
+ """
+ new_user = {'name': uuid.uuid4().hex, 'password': uuid.uuid4().hex,
+ 'enabled': True, 'domain_id': DEFAULT_DOMAIN_ID}
+ new_user = self.identity_api.create_user(new_user)
+ new_group = {'domain_id': DEFAULT_DOMAIN_ID, 'name': uuid.uuid4().hex}
+ new_group = self.identity_api.create_group(new_group)
+ new_project = {'id': uuid.uuid4().hex,
+ 'name': uuid.uuid4().hex,
+ 'domain_id': DEFAULT_DOMAIN_ID}
+ self.resource_api.create_project(new_project['id'], new_project)
+
+ # First check how many role grants already exist
+ existing_assignments_for_role = len(
+ self.assignment_api.list_role_assignments_for_role(
+ role_id='admin'))
+
+ # Now create the grants (roles are defined in default_fixtures)
+ self.assignment_api.create_grant(user_id=new_user['id'],
+ domain_id=DEFAULT_DOMAIN_ID,
+ role_id='member')
+ self.assignment_api.create_grant(user_id=new_user['id'],
+ project_id=new_project['id'],
+ role_id='other')
+ self.assignment_api.create_grant(group_id=new_group['id'],
+ domain_id=DEFAULT_DOMAIN_ID,
+ role_id='admin')
+ self.assignment_api.create_grant(group_id=new_group['id'],
+ project_id=new_project['id'],
+ role_id='admin')
+
# Read back the list of assignments for just the admin role, checking
# this only goes up by two.
assignment_list = self.assignment_api.list_role_assignments_for_role(
@@ -582,7 +622,7 @@ class IdentityTests(object):
# Now check that each of our two new entries are in the list
self.assertIn(
- {'group_id': new_group['id'], 'domain_id': new_domain['id'],
+ {'group_id': new_group['id'], 'domain_id': DEFAULT_DOMAIN_ID,
'role_id': 'admin'},
assignment_list)
self.assertIn(
@@ -598,8 +638,7 @@ class IdentityTests(object):
def get_member_assignments():
assignments = self.assignment_api.list_role_assignments()
- return filter(lambda x: x['role_id'] == MEMBER_ROLE_ID,
- assignments)
+ return [x for x in assignments if x['role_id'] == MEMBER_ROLE_ID]
orig_member_assignments = get_member_assignments()
@@ -627,8 +666,8 @@ class IdentityTests(object):
expected_member_assignments = orig_member_assignments + [{
'group_id': new_group['id'], 'project_id': new_project['id'],
'role_id': MEMBER_ROLE_ID}]
- self.assertThat(new_member_assignments,
- matchers.Equals(expected_member_assignments))
+ self.assertItemsEqual(expected_member_assignments,
+ new_member_assignments)
def test_list_role_assignments_bad_role(self):
assignment_list = self.assignment_api.list_role_assignments_for_role(
@@ -1976,6 +2015,16 @@ class IdentityTests(object):
project['id'],
project)
+ def test_create_project_invalid_domain_id(self):
+ project = {'id': uuid.uuid4().hex,
+ 'name': uuid.uuid4().hex,
+ 'domain_id': uuid.uuid4().hex,
+ 'enabled': True}
+ self.assertRaises(exception.DomainNotFound,
+ self.resource_api.create_project,
+ project['id'],
+ project)
+
def test_create_user_invalid_enabled_type_string(self):
user = {'name': uuid.uuid4().hex,
'domain_id': DEFAULT_DOMAIN_ID,
@@ -2079,7 +2128,7 @@ class IdentityTests(object):
# Create a project
project = {'id': uuid.uuid4().hex, 'domain_id': DEFAULT_DOMAIN_ID,
'name': uuid.uuid4().hex, 'description': uuid.uuid4().hex,
- 'enabled': True, 'parent_id': None}
+ 'enabled': True, 'parent_id': None, 'is_domain': False}
self.resource_api.create_project(project['id'], project)
# Build driver hints with the project's name and inexistent description
@@ -2131,12 +2180,15 @@ class IdentityTests(object):
self.assertIn(project2['id'], project_ids)
def _create_projects_hierarchy(self, hierarchy_size=2,
- domain_id=DEFAULT_DOMAIN_ID):
+ domain_id=DEFAULT_DOMAIN_ID,
+ is_domain=False):
"""Creates a project hierarchy with specified size.
:param hierarchy_size: the desired hierarchy size, default is 2 -
a project with one child.
:param domain_id: domain where the projects hierarchy will be created.
+ :param is_domain: if the hierarchy will have the is_domain flag active
+ or not.
:returns projects: a list of the projects in the created hierarchy.
@@ -2144,26 +2196,195 @@ class IdentityTests(object):
project_id = uuid.uuid4().hex
project = {'id': project_id,
'description': '',
- 'domain_id': domain_id,
'enabled': True,
'name': uuid.uuid4().hex,
- 'parent_id': None}
+ 'parent_id': None,
+ 'domain_id': domain_id,
+ 'is_domain': is_domain}
self.resource_api.create_project(project_id, project)
projects = [project]
for i in range(1, hierarchy_size):
new_project = {'id': uuid.uuid4().hex,
'description': '',
- 'domain_id': domain_id,
'enabled': True,
'name': uuid.uuid4().hex,
- 'parent_id': project_id}
+ 'parent_id': project_id,
+ 'is_domain': is_domain}
+ new_project['domain_id'] = domain_id
+
self.resource_api.create_project(new_project['id'], new_project)
projects.append(new_project)
project_id = new_project['id']
return projects
+ @tests.skip_if_no_multiple_domains_support
+ def test_create_domain_with_project_api(self):
+ project_id = uuid.uuid4().hex
+ project = {'id': project_id,
+ 'description': '',
+ 'domain_id': DEFAULT_DOMAIN_ID,
+ 'enabled': True,
+ 'name': uuid.uuid4().hex,
+ 'parent_id': None,
+ 'is_domain': True}
+ ref = self.resource_api.create_project(project['id'], project)
+ self.assertTrue(ref['is_domain'])
+ self.assertEqual(DEFAULT_DOMAIN_ID, ref['domain_id'])
+
+ @tests.skip_if_no_multiple_domains_support
+ @test_utils.wip('waiting for projects acting as domains implementation')
+ def test_is_domain_sub_project_has_parent_domain_id(self):
+ project = {'id': uuid.uuid4().hex,
+ 'description': '',
+ 'domain_id': DEFAULT_DOMAIN_ID,
+ 'enabled': True,
+ 'name': uuid.uuid4().hex,
+ 'parent_id': None,
+ 'is_domain': True}
+ self.resource_api.create_project(project['id'], project)
+
+ sub_project_id = uuid.uuid4().hex
+ sub_project = {'id': sub_project_id,
+ 'description': '',
+ 'domain_id': project['id'],
+ 'enabled': True,
+ 'name': uuid.uuid4().hex,
+ 'parent_id': project['id'],
+ 'is_domain': True}
+ ref = self.resource_api.create_project(sub_project['id'], sub_project)
+ self.assertTrue(ref['is_domain'])
+ self.assertEqual(project['id'], ref['parent_id'])
+ self.assertEqual(project['id'], ref['domain_id'])
+
+ @tests.skip_if_no_multiple_domains_support
+ @test_utils.wip('waiting for projects acting as domains implementation')
+ def test_delete_domain_with_project_api(self):
+ project_id = uuid.uuid4().hex
+ project = {'id': project_id,
+ 'description': '',
+ 'domain_id': None,
+ 'enabled': True,
+ 'name': uuid.uuid4().hex,
+ 'parent_id': None,
+ 'is_domain': True}
+ self.resource_api.create_project(project['id'], project)
+
+ # Try to delete is_domain project that is enabled
+ self.assertRaises(exception.ValidationError,
+ self.resource_api.delete_project,
+ project['id'])
+
+ # Disable the project
+ project['enabled'] = False
+ self.resource_api.update_project(project['id'], project)
+
+ # Successfuly delete the project
+ self.resource_api.delete_project(project['id'])
+
+ @tests.skip_if_no_multiple_domains_support
+ @test_utils.wip('waiting for projects acting as domains implementation')
+ def test_create_domain_under_regular_project_hierarchy_fails(self):
+ # Creating a regular project hierarchy. Projects acting as domains
+ # can't have a parent that is a regular project.
+ projects_hierarchy = self._create_projects_hierarchy()
+ parent = projects_hierarchy[1]
+ project_id = uuid.uuid4().hex
+ project = {'id': project_id,
+ 'description': '',
+ 'domain_id': parent['id'],
+ 'enabled': True,
+ 'name': uuid.uuid4().hex,
+ 'parent_id': parent['id'],
+ 'is_domain': True}
+
+ self.assertRaises(exception.ValidationError,
+ self.resource_api.create_project,
+ project['id'], project)
+
+ @tests.skip_if_no_multiple_domains_support
+ @test_utils.wip('waiting for projects acting as domains implementation')
+ def test_create_project_under_domain_hierarchy(self):
+ projects_hierarchy = self._create_projects_hierarchy(is_domain=True)
+ parent = projects_hierarchy[1]
+ project = {'id': uuid.uuid4().hex,
+ 'description': '',
+ 'domain_id': parent['id'],
+ 'enabled': True,
+ 'name': uuid.uuid4().hex,
+ 'parent_id': parent['id'],
+ 'is_domain': False}
+
+ ref = self.resource_api.create_project(project['id'], project)
+ self.assertFalse(ref['is_domain'])
+ self.assertEqual(parent['id'], ref['parent_id'])
+ self.assertEqual(parent['id'], ref['domain_id'])
+
+ def test_create_project_without_is_domain_flag(self):
+ project = {'id': uuid.uuid4().hex,
+ 'description': '',
+ 'domain_id': DEFAULT_DOMAIN_ID,
+ 'enabled': True,
+ 'name': uuid.uuid4().hex,
+ 'parent_id': None}
+
+ ref = self.resource_api.create_project(project['id'], project)
+ # The is_domain flag should be False by default
+ self.assertFalse(ref['is_domain'])
+
+ def test_create_is_domain_project(self):
+ project = {'id': uuid.uuid4().hex,
+ 'description': '',
+ 'domain_id': DEFAULT_DOMAIN_ID,
+ 'enabled': True,
+ 'name': uuid.uuid4().hex,
+ 'parent_id': None,
+ 'is_domain': True}
+
+ ref = self.resource_api.create_project(project['id'], project)
+ self.assertTrue(ref['is_domain'])
+
+ @test_utils.wip('waiting for projects acting as domains implementation')
+ def test_create_project_with_parent_id_and_without_domain_id(self):
+ project = {'id': uuid.uuid4().hex,
+ 'description': '',
+ 'domain_id': None,
+ 'enabled': True,
+ 'name': uuid.uuid4().hex,
+ 'parent_id': None}
+ self.resource_api.create_project(project['id'], project)
+
+ sub_project = {'id': uuid.uuid4().hex,
+ 'description': '',
+ 'enabled': True,
+ 'name': uuid.uuid4().hex,
+ 'parent_id': project['id']}
+ ref = self.resource_api.create_project(sub_project['id'], sub_project)
+
+ # The domain_id should be set to the parent domain_id
+ self.assertEqual(project['domain_id'], ref['domain_id'])
+
+ @test_utils.wip('waiting for projects acting as domains implementation')
+ def test_create_project_with_domain_id_and_without_parent_id(self):
+ project = {'id': uuid.uuid4().hex,
+ 'description': '',
+ 'domain_id': None,
+ 'enabled': True,
+ 'name': uuid.uuid4().hex,
+ 'parent_id': None}
+ self.resource_api.create_project(project['id'], project)
+
+ sub_project = {'id': uuid.uuid4().hex,
+ 'description': '',
+ 'enabled': True,
+ 'domain_id': project['id'],
+ 'name': uuid.uuid4().hex}
+ ref = self.resource_api.create_project(sub_project['id'], sub_project)
+
+ # The parent_id should be set to the domain_id
+ self.assertEqual(ref['parent_id'], project['id'])
+
def test_check_leaf_projects(self):
projects_hierarchy = self._create_projects_hierarchy()
root_project = projects_hierarchy[0]
@@ -2191,7 +2412,8 @@ class IdentityTests(object):
'domain_id': DEFAULT_DOMAIN_ID,
'enabled': True,
'name': uuid.uuid4().hex,
- 'parent_id': project2['id']}
+ 'parent_id': project2['id'],
+ 'is_domain': False}
self.resource_api.create_project(project4['id'], project4)
subtree = self.resource_api.list_projects_in_subtree(project1['id'])
@@ -2208,6 +2430,48 @@ class IdentityTests(object):
subtree = self.resource_api.list_projects_in_subtree(project3['id'])
self.assertEqual(0, len(subtree))
+ def test_list_projects_in_subtree_with_circular_reference(self):
+ project1_id = uuid.uuid4().hex
+ project2_id = uuid.uuid4().hex
+
+ project1 = {'id': project1_id,
+ 'description': '',
+ 'domain_id': DEFAULT_DOMAIN_ID,
+ 'enabled': True,
+ 'name': uuid.uuid4().hex}
+ self.resource_api.create_project(project1['id'], project1)
+
+ project2 = {'id': project2_id,
+ 'description': '',
+ 'domain_id': DEFAULT_DOMAIN_ID,
+ 'enabled': True,
+ 'name': uuid.uuid4().hex,
+ 'parent_id': project1_id}
+ self.resource_api.create_project(project2['id'], project2)
+
+ project1['parent_id'] = project2_id # Adds cyclic reference
+
+ # NOTE(dstanek): The manager does not allow parent_id to be updated.
+ # Instead will directly use the driver to create the cyclic
+ # reference.
+ self.resource_api.driver.update_project(project1_id, project1)
+
+ subtree = self.resource_api.list_projects_in_subtree(project1_id)
+
+ # NOTE(dstanek): If a cyclic refence is detected the code bails
+ # and returns None instead of falling into the infinite
+ # recursion trap.
+ self.assertIsNone(subtree)
+
+ def test_list_projects_in_subtree_invalid_project_id(self):
+ self.assertRaises(exception.ValidationError,
+ self.resource_api.list_projects_in_subtree,
+ None)
+
+ self.assertRaises(exception.ProjectNotFound,
+ self.resource_api.list_projects_in_subtree,
+ uuid.uuid4().hex)
+
def test_list_project_parents(self):
projects_hierarchy = self._create_projects_hierarchy(hierarchy_size=3)
project1 = projects_hierarchy[0]
@@ -2218,7 +2482,8 @@ class IdentityTests(object):
'domain_id': DEFAULT_DOMAIN_ID,
'enabled': True,
'name': uuid.uuid4().hex,
- 'parent_id': project2['id']}
+ 'parent_id': project2['id'],
+ 'is_domain': False}
self.resource_api.create_project(project4['id'], project4)
parents1 = self.resource_api.list_project_parents(project3['id'])
@@ -2232,6 +2497,15 @@ class IdentityTests(object):
parents = self.resource_api.list_project_parents(project1['id'])
self.assertEqual(0, len(parents))
+ def test_list_project_parents_invalid_project_id(self):
+ self.assertRaises(exception.ValidationError,
+ self.resource_api.list_project_parents,
+ None)
+
+ self.assertRaises(exception.ProjectNotFound,
+ self.resource_api.list_project_parents,
+ uuid.uuid4().hex)
+
def test_delete_project_with_role_assignments(self):
tenant = {'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex,
'domain_id': DEFAULT_DOMAIN_ID}
@@ -2812,29 +3086,36 @@ class IdentityTests(object):
'description': '',
'domain_id': DEFAULT_DOMAIN_ID,
'enabled': True,
- 'parent_id': 'fake'}
+ 'parent_id': 'fake',
+ 'is_domain': False}
self.assertRaises(exception.ProjectNotFound,
self.resource_api.create_project,
project['id'],
project)
- def test_create_leaf_project_with_invalid_domain(self):
+ @tests.skip_if_no_multiple_domains_support
+ def test_create_leaf_project_with_different_domain(self):
root_project = {'id': uuid.uuid4().hex,
'name': uuid.uuid4().hex,
'description': '',
'domain_id': DEFAULT_DOMAIN_ID,
'enabled': True,
- 'parent_id': None}
+ 'parent_id': None,
+ 'is_domain': False}
self.resource_api.create_project(root_project['id'], root_project)
+ domain = {'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex,
+ 'enabled': True}
+ self.resource_api.create_domain(domain['id'], domain)
leaf_project = {'id': uuid.uuid4().hex,
'name': uuid.uuid4().hex,
'description': '',
- 'domain_id': 'fake',
+ 'domain_id': domain['id'],
'enabled': True,
- 'parent_id': root_project['id']}
+ 'parent_id': root_project['id'],
+ 'is_domain': False}
- self.assertRaises(exception.ForbiddenAction,
+ self.assertRaises(exception.ValidationError,
self.resource_api.create_project,
leaf_project['id'],
leaf_project)
@@ -2883,17 +3164,19 @@ class IdentityTests(object):
'name': uuid.uuid4().hex,
'domain_id': DEFAULT_DOMAIN_ID,
'enabled': False,
- 'parent_id': None}
+ 'parent_id': None,
+ 'is_domain': False}
self.resource_api.create_project(project1['id'], project1)
project2 = {'id': uuid.uuid4().hex,
'name': uuid.uuid4().hex,
'domain_id': DEFAULT_DOMAIN_ID,
- 'parent_id': project1['id']}
+ 'parent_id': project1['id'],
+ 'is_domain': False}
# It's not possible to create a project under a disabled one in the
# hierarchy
- self.assertRaises(exception.ForbiddenAction,
+ self.assertRaises(exception.ValidationError,
self.resource_api.create_project,
project2['id'],
project2)
@@ -2955,7 +3238,8 @@ class IdentityTests(object):
'id': project_id,
'name': uuid.uuid4().hex,
'domain_id': DEFAULT_DOMAIN_ID,
- 'parent_id': leaf_project['id']}
+ 'parent_id': leaf_project['id'],
+ 'is_domain': False}
self.assertRaises(exception.ForbiddenAction,
self.resource_api.create_project,
project_id,
@@ -2967,7 +3251,8 @@ class IdentityTests(object):
'name': uuid.uuid4().hex,
'domain_id': DEFAULT_DOMAIN_ID,
'enabled': True,
- 'parent_id': None}
+ 'parent_id': None,
+ 'is_domain': False}
self.resource_api.create_project(project['id'], project)
# Add a description attribute.
@@ -2983,7 +3268,8 @@ class IdentityTests(object):
'name': uuid.uuid4().hex,
'domain_id': DEFAULT_DOMAIN_ID,
'enabled': True,
- 'parent_id': None}
+ 'parent_id': None,
+ 'is_domain': False}
self.resource_api.create_project(project['id'], project)
# Add a description attribute.
@@ -3427,8 +3713,7 @@ class IdentityTests(object):
def get_member_assignments():
assignments = self.assignment_api.list_role_assignments()
- return filter(lambda x: x['role_id'] == MEMBER_ROLE_ID,
- assignments)
+ return [x for x in assignments if x['role_id'] == MEMBER_ROLE_ID]
orig_member_assignments = get_member_assignments()
@@ -3662,16 +3947,16 @@ class IdentityTests(object):
domain2 = {'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex}
self.resource_api.create_domain(domain2['id'], domain2)
project1 = {'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex,
- 'domain_id': domain1['id']}
+ 'domain_id': domain1['id'], 'is_domain': False}
project1 = self.resource_api.create_project(project1['id'], project1)
project2 = {'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex,
- 'domain_id': domain1['id']}
+ 'domain_id': domain1['id'], 'is_domain': False}
project2 = self.resource_api.create_project(project2['id'], project2)
project3 = {'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex,
- 'domain_id': domain1['id']}
+ 'domain_id': domain1['id'], 'is_domain': False}
project3 = self.resource_api.create_project(project3['id'], project3)
project4 = {'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex,
- 'domain_id': domain2['id']}
+ 'domain_id': domain2['id'], 'is_domain': False}
project4 = self.resource_api.create_project(project4['id'], project4)
group_list = []
role_list = []
@@ -4291,7 +4576,9 @@ class TrustTests(object):
trust_data = self.trust_api.get_trust(trust_id)
self.assertEqual(new_id, trust_data['id'])
self.trust_api.delete_trust(trust_id)
- self.assertIsNone(self.trust_api.get_trust(trust_id))
+ self.assertRaises(exception.TrustNotFound,
+ self.trust_api.get_trust,
+ trust_id)
def test_delete_trust_not_found(self):
trust_id = uuid.uuid4().hex
@@ -4314,7 +4601,9 @@ class TrustTests(object):
self.assertIsNotNone(trust_data)
self.assertIsNone(trust_data['deleted_at'])
self.trust_api.delete_trust(new_id)
- self.assertIsNone(self.trust_api.get_trust(new_id))
+ self.assertRaises(exception.TrustNotFound,
+ self.trust_api.get_trust,
+ new_id)
deleted_trust = self.trust_api.get_trust(trust_data['id'],
deleted=True)
self.assertEqual(trust_data['id'], deleted_trust['id'])
@@ -4389,7 +4678,9 @@ class TrustTests(object):
self.assertEqual(1, t['remaining_uses'])
self.trust_api.consume_use(trust_data['id'])
# This was the last use, the trust isn't available anymore
- self.assertIsNone(self.trust_api.get_trust(trust_data['id']))
+ self.assertRaises(exception.TrustNotFound,
+ self.trust_api.get_trust,
+ trust_data['id'])
class CatalogTests(object):
@@ -4907,7 +5198,6 @@ class CatalogTests(object):
endpoint = {
'id': uuid.uuid4().hex,
- 'region_id': None,
'service_id': service['id'],
'interface': 'public',
'url': uuid.uuid4().hex,
@@ -5007,6 +5297,29 @@ class CatalogTests(object):
return service_ref, enabled_endpoint_ref, disabled_endpoint_ref
+ def test_list_endpoints(self):
+ service = {
+ 'id': uuid.uuid4().hex,
+ 'type': uuid.uuid4().hex,
+ 'name': uuid.uuid4().hex,
+ 'description': uuid.uuid4().hex,
+ }
+ self.catalog_api.create_service(service['id'], service.copy())
+
+ expected_ids = set([uuid.uuid4().hex for _ in range(3)])
+ for endpoint_id in expected_ids:
+ endpoint = {
+ 'id': endpoint_id,
+ 'region_id': None,
+ 'service_id': service['id'],
+ 'interface': 'public',
+ 'url': uuid.uuid4().hex,
+ }
+ self.catalog_api.create_endpoint(endpoint['id'], endpoint.copy())
+
+ endpoints = self.catalog_api.list_endpoints()
+ self.assertEqual(expected_ids, set(e['id'] for e in endpoints))
+
def test_get_catalog_endpoint_disabled(self):
"""Get back only enabled endpoints when get the v2 catalog."""
@@ -5157,6 +5470,77 @@ class PolicyTests(object):
class InheritanceTests(object):
+ def _test_crud_inherited_and_direct_assignment(self, **kwargs):
+ """Tests inherited and direct assignments for the actor and target
+
+ Ensure it is possible to create both inherited and direct role
+ assignments for the same actor on the same target. The actor and the
+ target are specified in the kwargs as ('user_id' or 'group_id') and
+ ('project_id' or 'domain_id'), respectively.
+
+ """
+
+ # Create a new role to avoid assignments loaded from default fixtures
+ role = {'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex}
+ role = self.role_api.create_role(role['id'], role)
+
+ # Define the common assigment entity
+ assignment_entity = {'role_id': role['id']}
+ assignment_entity.update(kwargs)
+
+ # Define assignments under test
+ direct_assignment_entity = assignment_entity.copy()
+ inherited_assignment_entity = assignment_entity.copy()
+ inherited_assignment_entity['inherited_to_projects'] = 'projects'
+
+ # Create direct assignment and check grants
+ self.assignment_api.create_grant(inherited_to_projects=False,
+ **assignment_entity)
+
+ grants = self.assignment_api.list_role_assignments_for_role(role['id'])
+ self.assertThat(grants, matchers.HasLength(1))
+ self.assertIn(direct_assignment_entity, grants)
+
+ # Now add inherited assignment and check grants
+ self.assignment_api.create_grant(inherited_to_projects=True,
+ **assignment_entity)
+
+ grants = self.assignment_api.list_role_assignments_for_role(role['id'])
+ self.assertThat(grants, matchers.HasLength(2))
+ self.assertIn(direct_assignment_entity, grants)
+ self.assertIn(inherited_assignment_entity, grants)
+
+ # Delete both and check grants
+ self.assignment_api.delete_grant(inherited_to_projects=False,
+ **assignment_entity)
+ self.assignment_api.delete_grant(inherited_to_projects=True,
+ **assignment_entity)
+
+ grants = self.assignment_api.list_role_assignments_for_role(role['id'])
+ self.assertEqual([], grants)
+
+ def test_crud_inherited_and_direct_assignment_for_user_on_domain(self):
+ self._test_crud_inherited_and_direct_assignment(
+ user_id=self.user_foo['id'], domain_id=DEFAULT_DOMAIN_ID)
+
+ def test_crud_inherited_and_direct_assignment_for_group_on_domain(self):
+ group = {'name': uuid.uuid4().hex, 'domain_id': DEFAULT_DOMAIN_ID}
+ group = self.identity_api.create_group(group)
+
+ self._test_crud_inherited_and_direct_assignment(
+ group_id=group['id'], domain_id=DEFAULT_DOMAIN_ID)
+
+ def test_crud_inherited_and_direct_assignment_for_user_on_project(self):
+ self._test_crud_inherited_and_direct_assignment(
+ user_id=self.user_foo['id'], project_id=self.tenant_baz['id'])
+
+ def test_crud_inherited_and_direct_assignment_for_group_on_project(self):
+ group = {'name': uuid.uuid4().hex, 'domain_id': DEFAULT_DOMAIN_ID}
+ group = self.identity_api.create_group(group)
+
+ self._test_crud_inherited_and_direct_assignment(
+ group_id=group['id'], project_id=self.tenant_baz['id'])
+
def test_inherited_role_grants_for_user(self):
"""Test inherited user roles.
@@ -5375,14 +5759,16 @@ class InheritanceTests(object):
'domain_id': DEFAULT_DOMAIN_ID,
'enabled': True,
'name': uuid.uuid4().hex,
- 'parent_id': None}
+ 'parent_id': None,
+ 'is_domain': False}
self.resource_api.create_project(root_project['id'], root_project)
leaf_project = {'id': uuid.uuid4().hex,
'description': '',
'domain_id': DEFAULT_DOMAIN_ID,
'enabled': True,
'name': uuid.uuid4().hex,
- 'parent_id': root_project['id']}
+ 'parent_id': root_project['id'],
+ 'is_domain': False}
self.resource_api.create_project(leaf_project['id'], leaf_project)
user = {'name': uuid.uuid4().hex, 'password': uuid.uuid4().hex,
@@ -5496,14 +5882,16 @@ class InheritanceTests(object):
'domain_id': DEFAULT_DOMAIN_ID,
'enabled': True,
'name': uuid.uuid4().hex,
- 'parent_id': None}
+ 'parent_id': None,
+ 'is_domain': False}
self.resource_api.create_project(root_project['id'], root_project)
leaf_project = {'id': uuid.uuid4().hex,
'description': '',
'domain_id': DEFAULT_DOMAIN_ID,
'enabled': True,
'name': uuid.uuid4().hex,
- 'parent_id': root_project['id']}
+ 'parent_id': root_project['id'],
+ 'is_domain': False}
self.resource_api.create_project(leaf_project['id'], leaf_project)
user = {'name': uuid.uuid4().hex, 'password': uuid.uuid4().hex,
@@ -5663,6 +6051,65 @@ class FilterTests(filtering.FilterTests):
self._delete_test_data('user', user_list)
self._delete_test_data('group', group_list)
+ def _get_user_name_field_size(self):
+ """Return the size of the user name field for the backend.
+
+ Subclasses can override this method to indicate that the user name
+ field is limited in length. The user name is the field used in the test
+ that validates that a filter value works even if it's longer than a
+ field.
+
+ If the backend doesn't limit the value length then return None.
+
+ """
+ return None
+
+ def test_filter_value_wider_than_field(self):
+ # If a filter value is given that's larger than the field in the
+ # backend then no values are returned.
+
+ user_name_field_size = self._get_user_name_field_size()
+
+ if user_name_field_size is None:
+ # The backend doesn't limit the size of the user name, so pass this
+ # test.
+ return
+
+ # Create some users just to make sure would return something if the
+ # filter was ignored.
+ self._create_test_data('user', 2)
+
+ hints = driver_hints.Hints()
+ value = 'A' * (user_name_field_size + 1)
+ hints.add_filter('name', value)
+ users = self.identity_api.list_users(hints=hints)
+ self.assertEqual([], users)
+
+ def test_list_users_in_group_filtered(self):
+ number_of_users = 10
+ user_name_data = {
+ 1: 'Arthur Conan Doyle',
+ 3: 'Arthur Rimbaud',
+ 9: 'Arthur Schopenhauer',
+ }
+ user_list = self._create_test_data(
+ 'user', number_of_users,
+ domain_id=DEFAULT_DOMAIN_ID, name_dict=user_name_data)
+ group = self._create_one_entity('group',
+ DEFAULT_DOMAIN_ID, 'Great Writers')
+ for i in range(7):
+ self.identity_api.add_user_to_group(user_list[i]['id'],
+ group['id'])
+
+ hints = driver_hints.Hints()
+ hints.add_filter('name', 'Arthur', comparator='startswith')
+ users = self.identity_api.list_users_in_group(group['id'], hints=hints)
+ self.assertThat(len(users), matchers.Equals(2))
+ self.assertIn(user_list[1]['id'], [users[0]['id'], users[1]['id']])
+ self.assertIn(user_list[3]['id'], [users[0]['id'], users[1]['id']])
+ self._delete_test_data('user', user_list)
+ self._delete_entity('group')(group['id'])
+
class LimitTests(filtering.FilterTests):
ENTITIES = ['user', 'group', 'project']
diff --git a/keystone-moon/keystone/tests/unit/test_backend_endpoint_policy.py b/keystone-moon/keystone/tests/unit/test_backend_endpoint_policy.py
index cc41d977..6c2181aa 100644
--- a/keystone-moon/keystone/tests/unit/test_backend_endpoint_policy.py
+++ b/keystone-moon/keystone/tests/unit/test_backend_endpoint_policy.py
@@ -14,6 +14,7 @@
import uuid
+from six.moves import range
from testtools import matchers
from keystone import exception
diff --git a/keystone-moon/keystone/tests/unit/test_backend_endpoint_policy_sql.py b/keystone-moon/keystone/tests/unit/test_backend_endpoint_policy_sql.py
index dab02859..134a03f0 100644
--- a/keystone-moon/keystone/tests/unit/test_backend_endpoint_policy_sql.py
+++ b/keystone-moon/keystone/tests/unit/test_backend_endpoint_policy_sql.py
@@ -21,7 +21,8 @@ class SqlPolicyAssociationTable(test_backend_sql.SqlModels):
"""Set of tests for checking SQL Policy Association Mapping."""
def test_policy_association_mapping(self):
- cols = (('policy_id', sql.String, 64),
+ cols = (('id', sql.String, 64),
+ ('policy_id', sql.String, 64),
('endpoint_id', sql.String, 64),
('service_id', sql.String, 64),
('region_id', sql.String, 64))
diff --git a/keystone-moon/keystone/tests/unit/test_backend_federation_sql.py b/keystone-moon/keystone/tests/unit/test_backend_federation_sql.py
index 48ebad6c..995c564d 100644
--- a/keystone-moon/keystone/tests/unit/test_backend_federation_sql.py
+++ b/keystone-moon/keystone/tests/unit/test_backend_federation_sql.py
@@ -21,11 +21,15 @@ class SqlFederation(test_backend_sql.SqlModels):
def test_identity_provider(self):
cols = (('id', sql.String, 64),
- ('remote_id', sql.String, 256),
('enabled', sql.Boolean, None),
('description', sql.Text, None))
self.assertExpectedSchema('identity_provider', cols)
+ def test_idp_remote_ids(self):
+ cols = (('idp_id', sql.String, 64),
+ ('remote_id', sql.String, 255))
+ self.assertExpectedSchema('idp_remote_ids', cols)
+
def test_federated_protocol(self):
cols = (('id', sql.String, 64),
('idp_id', sql.String, 64),
@@ -42,5 +46,6 @@ class SqlFederation(test_backend_sql.SqlModels):
('id', sql.String, 64),
('enabled', sql.Boolean, None),
('description', sql.Text, None),
+ ('relay_state_prefix', sql.String, 256),
('sp_url', sql.String, 256))
self.assertExpectedSchema('service_provider', cols)
diff --git a/keystone-moon/keystone/tests/unit/test_backend_kvs.py b/keystone-moon/keystone/tests/unit/test_backend_kvs.py
index c0997ad9..a22faa59 100644
--- a/keystone-moon/keystone/tests/unit/test_backend_kvs.py
+++ b/keystone-moon/keystone/tests/unit/test_backend_kvs.py
@@ -18,6 +18,7 @@ from oslo_config import cfg
from oslo_utils import timeutils
import six
+from keystone.common import utils
from keystone import exception
from keystone.tests import unit as tests
from keystone.tests.unit import test_backend
@@ -67,13 +68,13 @@ class KvsToken(tests.TestCase, test_backend.TokenTests):
valid_token_ref = token_persistence.get_token(valid_token_id)
expired_token_ref = token_persistence.get_token(expired_token_id)
expected_user_token_list = [
- (valid_token_id, timeutils.isotime(valid_token_ref['expires'],
- subsecond=True)),
- (expired_token_id, timeutils.isotime(expired_token_ref['expires'],
- subsecond=True))]
+ (valid_token_id, utils.isotime(valid_token_ref['expires'],
+ subsecond=True)),
+ (expired_token_id, utils.isotime(expired_token_ref['expires'],
+ subsecond=True))]
self.assertEqual(expected_user_token_list, user_token_list)
new_expired_data = (expired_token_id,
- timeutils.isotime(
+ utils.isotime(
(timeutils.utcnow() - expire_delta),
subsecond=True))
self._update_user_token_index_direct(user_key, expired_token_id,
@@ -82,10 +83,10 @@ class KvsToken(tests.TestCase, test_backend.TokenTests):
user_id=user_id)
valid_token_ref_2 = token_persistence.get_token(valid_token_id_2)
expected_user_token_list = [
- (valid_token_id, timeutils.isotime(valid_token_ref['expires'],
- subsecond=True)),
- (valid_token_id_2, timeutils.isotime(valid_token_ref_2['expires'],
- subsecond=True))]
+ (valid_token_id, utils.isotime(valid_token_ref['expires'],
+ subsecond=True)),
+ (valid_token_id_2, utils.isotime(valid_token_ref_2['expires'],
+ subsecond=True))]
user_token_list = token_persistence.driver._store.get(user_key)
self.assertEqual(expected_user_token_list, user_token_list)
@@ -94,10 +95,10 @@ class KvsToken(tests.TestCase, test_backend.TokenTests):
new_token_id, data = self.create_token_sample_data(user_id=user_id)
new_token_ref = token_persistence.get_token(new_token_id)
expected_user_token_list = [
- (valid_token_id, timeutils.isotime(valid_token_ref['expires'],
- subsecond=True)),
- (new_token_id, timeutils.isotime(new_token_ref['expires'],
- subsecond=True))]
+ (valid_token_id, utils.isotime(valid_token_ref['expires'],
+ subsecond=True)),
+ (new_token_id, utils.isotime(new_token_ref['expires'],
+ subsecond=True))]
user_token_list = token_persistence.driver._store.get(user_key)
self.assertEqual(expected_user_token_list, user_token_list)
@@ -110,9 +111,7 @@ class KvsCatalog(tests.TestCase, test_backend.CatalogTests):
def config_overrides(self):
super(KvsCatalog, self).config_overrides()
- self.config_fixture.config(
- group='catalog',
- driver='keystone.catalog.backends.kvs.Catalog')
+ self.config_fixture.config(group='catalog', driver='kvs')
def _load_fake_catalog(self):
self.catalog_foobar = self.catalog_api.driver._create_catalog(
@@ -167,6 +166,4 @@ class KvsTokenCacheInvalidation(tests.TestCase,
def config_overrides(self):
super(KvsTokenCacheInvalidation, self).config_overrides()
- self.config_fixture.config(
- group='token',
- driver='keystone.token.persistence.backends.kvs.Token')
+ self.config_fixture.config(group='token', driver='kvs')
diff --git a/keystone-moon/keystone/tests/unit/test_backend_ldap.py b/keystone-moon/keystone/tests/unit/test_backend_ldap.py
index 10119808..94fb82e7 100644
--- a/keystone-moon/keystone/tests/unit/test_backend_ldap.py
+++ b/keystone-moon/keystone/tests/unit/test_backend_ldap.py
@@ -20,27 +20,92 @@ import uuid
import ldap
import mock
from oslo_config import cfg
+import pkg_resources
+from six.moves import range
from testtools import matchers
from keystone.common import cache
from keystone.common import ldap as common_ldap
from keystone.common.ldap import core as common_ldap_core
-from keystone.common import sql
from keystone import exception
from keystone import identity
from keystone.identity.mapping_backends import mapping as map
from keystone import resource
from keystone.tests import unit as tests
from keystone.tests.unit import default_fixtures
-from keystone.tests.unit import fakeldap
from keystone.tests.unit import identity_mapping as mapping_sql
from keystone.tests.unit.ksfixtures import database
+from keystone.tests.unit.ksfixtures import ldapdb
from keystone.tests.unit import test_backend
CONF = cfg.CONF
+def _assert_backends(testcase, **kwargs):
+
+ def _get_backend_cls(testcase, subsystem):
+ observed_backend = getattr(testcase, subsystem + '_api').driver
+ return observed_backend.__class__
+
+ def _get_domain_specific_backend_cls(manager, domain):
+ observed_backend = manager.domain_configs.get_domain_driver(domain)
+ return observed_backend.__class__
+
+ def _get_entrypoint_cls(subsystem, name):
+ entrypoint = entrypoint_map['keystone.' + subsystem][name]
+ return entrypoint.resolve()
+
+ def _load_domain_specific_configs(manager):
+ if (not manager.domain_configs.configured and
+ CONF.identity.domain_specific_drivers_enabled):
+ manager.domain_configs.setup_domain_drivers(
+ manager.driver, manager.resource_api)
+
+ def _assert_equal(expected_cls, observed_cls, subsystem,
+ domain=None):
+ msg = ('subsystem %(subsystem)s expected %(expected_cls)r, '
+ 'but observed %(observed_cls)r')
+ if domain:
+ subsystem = '%s[domain=%s]' % (subsystem, domain)
+ assert expected_cls == observed_cls, msg % {
+ 'expected_cls': expected_cls,
+ 'observed_cls': observed_cls,
+ 'subsystem': subsystem,
+ }
+
+ env = pkg_resources.Environment()
+ keystone_dist = env['keystone'][0]
+ entrypoint_map = pkg_resources.get_entry_map(keystone_dist)
+
+ for subsystem, entrypoint_name in kwargs.items():
+ if isinstance(entrypoint_name, str):
+ observed_cls = _get_backend_cls(testcase, subsystem)
+ expected_cls = _get_entrypoint_cls(subsystem, entrypoint_name)
+ _assert_equal(expected_cls, observed_cls, subsystem)
+
+ elif isinstance(entrypoint_name, dict):
+ manager = getattr(testcase, subsystem + '_api')
+ _load_domain_specific_configs(manager)
+
+ for domain, entrypoint_name in entrypoint_name.items():
+ if domain is None:
+ observed_cls = _get_backend_cls(testcase, subsystem)
+ expected_cls = _get_entrypoint_cls(
+ subsystem, entrypoint_name)
+ _assert_equal(expected_cls, observed_cls, subsystem)
+ continue
+
+ observed_cls = _get_domain_specific_backend_cls(
+ manager, domain)
+ expected_cls = _get_entrypoint_cls(subsystem, entrypoint_name)
+ _assert_equal(expected_cls, observed_cls, subsystem, domain)
+
+ else:
+ raise ValueError('%r is not an expected value for entrypoint name'
+ % entrypoint_name)
+
+
def create_group_container(identity_api):
# Create the groups base entry (ou=Groups,cn=example,cn=com)
group_api = identity_api.driver.group
@@ -54,35 +119,22 @@ class BaseLDAPIdentity(test_backend.IdentityTests):
def setUp(self):
super(BaseLDAPIdentity, self).setUp()
- self.clear_database()
+ self.ldapdb = self.useFixture(ldapdb.LDAPDatabase())
- common_ldap.register_handler('fake://', fakeldap.FakeLdap)
self.load_backends()
self.load_fixtures(default_fixtures)
- self.addCleanup(common_ldap_core._HANDLERS.clear)
-
def _get_domain_fixture(self):
"""Domains in LDAP are read-only, so just return the static one."""
return self.resource_api.get_domain(CONF.identity.default_domain_id)
- def clear_database(self):
- for shelf in fakeldap.FakeShelves:
- fakeldap.FakeShelves[shelf].clear()
-
- def reload_backends(self, domain_id):
- # Only one backend unless we are using separate domain backends
- self.load_backends()
-
def get_config(self, domain_id):
# Only one conf structure unless we are using separate domain backends
return CONF
def config_overrides(self):
super(BaseLDAPIdentity, self).config_overrides()
- self.config_fixture.config(
- group='identity',
- driver='keystone.identity.backends.ldap.Identity')
+ self.config_fixture.config(group='identity', driver='ldap')
def config_files(self):
config_files = super(BaseLDAPIdentity, self).config_files()
@@ -127,11 +179,11 @@ class BaseLDAPIdentity(test_backend.IdentityTests):
user['id'])
def test_configurable_forbidden_user_actions(self):
- conf = self.get_config(CONF.identity.default_domain_id)
- conf.ldap.user_allow_create = False
- conf.ldap.user_allow_update = False
- conf.ldap.user_allow_delete = False
- self.reload_backends(CONF.identity.default_domain_id)
+ driver = self.identity_api._select_identity_driver(
+ CONF.identity.default_domain_id)
+ driver.user.allow_create = False
+ driver.user.allow_update = False
+ driver.user.allow_delete = False
user = {'name': u'fäké1',
'password': u'fäképass1',
@@ -152,9 +204,9 @@ class BaseLDAPIdentity(test_backend.IdentityTests):
self.user_foo['id'])
def test_configurable_forbidden_create_existing_user(self):
- conf = self.get_config(CONF.identity.default_domain_id)
- conf.ldap.user_allow_create = False
- self.reload_backends(CONF.identity.default_domain_id)
+ driver = self.identity_api._select_identity_driver(
+ CONF.identity.default_domain_id)
+ driver.user.allow_create = False
self.assertRaises(exception.ForbiddenAction,
self.identity_api.create_user,
@@ -165,9 +217,9 @@ class BaseLDAPIdentity(test_backend.IdentityTests):
self.user_foo.pop('password')
self.assertDictEqual(user_ref, self.user_foo)
- conf = self.get_config(user_ref['domain_id'])
- conf.ldap.user_filter = '(CN=DOES_NOT_MATCH)'
- self.reload_backends(user_ref['domain_id'])
+ driver = self.identity_api._select_identity_driver(
+ user_ref['domain_id'])
+ driver.user.ldap_filter = '(CN=DOES_NOT_MATCH)'
# invalidate the cache if the result is cached.
self.identity_api.get_user.invalidate(self.identity_api,
self.user_foo['id'])
@@ -468,9 +520,16 @@ class BaseLDAPIdentity(test_backend.IdentityTests):
after_assignments = len(self.assignment_api.list_role_assignments())
self.assertEqual(existing_assignments + 2, after_assignments)
+ def test_list_role_assignments_filtered_by_role(self):
+ # Domain roles are not supported by the LDAP Assignment backend
+ self.assertRaises(
+ exception.NotImplemented,
+ super(BaseLDAPIdentity, self).
+ test_list_role_assignments_filtered_by_role)
+
def test_list_role_assignments_dumb_member(self):
self.config_fixture.config(group='ldap', use_dumb_member=True)
- self.clear_database()
+ self.ldapdb.clear()
self.load_backends()
self.load_fixtures(default_fixtures)
@@ -495,7 +554,7 @@ class BaseLDAPIdentity(test_backend.IdentityTests):
def test_list_user_ids_for_project_dumb_member(self):
self.config_fixture.config(group='ldap', use_dumb_member=True)
- self.clear_database()
+ self.ldapdb.clear()
self.load_backends()
self.load_fixtures(default_fixtures)
@@ -569,7 +628,7 @@ class BaseLDAPIdentity(test_backend.IdentityTests):
def test_list_group_members_dumb_member(self):
self.config_fixture.config(group='ldap', use_dumb_member=True)
- self.clear_database()
+ self.ldapdb.clear()
self.load_backends()
self.load_fixtures(default_fixtures)
@@ -686,11 +745,10 @@ class BaseLDAPIdentity(test_backend.IdentityTests):
def test_create_user_none_mapping(self):
# When create a user where an attribute maps to None, the entry is
# created without that attribute and it doesn't fail with a TypeError.
- conf = self.get_config(CONF.identity.default_domain_id)
- conf.ldap.user_attribute_ignore = ['enabled', 'email',
- 'tenants', 'tenantId']
- self.reload_backends(CONF.identity.default_domain_id)
-
+ driver = self.identity_api._select_identity_driver(
+ CONF.identity.default_domain_id)
+ driver.user.attribute_ignore = ['enabled', 'email',
+ 'tenants', 'tenantId']
user = {'name': u'fäké1',
'password': u'fäképass1',
'domain_id': CONF.identity.default_domain_id,
@@ -723,10 +781,10 @@ class BaseLDAPIdentity(test_backend.IdentityTests):
# Ensure that an attribute that maps to None that is not explicitly
# ignored in configuration is implicitly ignored without triggering
# an error.
- conf = self.get_config(CONF.identity.default_domain_id)
- conf.ldap.user_attribute_ignore = ['enabled', 'email',
- 'tenants', 'tenantId']
- self.reload_backends(CONF.identity.default_domain_id)
+ driver = self.identity_api._select_identity_driver(
+ CONF.identity.default_domain_id)
+ driver.user.attribute_ignore = ['enabled', 'email',
+ 'tenants', 'tenantId']
user = {'name': u'fäké1',
'password': u'fäképass1',
@@ -930,6 +988,10 @@ class LDAPIdentity(BaseLDAPIdentity, tests.TestCase):
# credentials) that require a database.
self.useFixture(database.Database())
super(LDAPIdentity, self).setUp()
+ _assert_backends(self,
+ assignment='ldap',
+ identity='ldap',
+ resource='ldap')
def load_fixtures(self, fixtures):
# Override super impl since need to create group container.
@@ -937,7 +999,9 @@ class LDAPIdentity(BaseLDAPIdentity, tests.TestCase):
super(LDAPIdentity, self).load_fixtures(fixtures)
def test_configurable_allowed_project_actions(self):
- tenant = {'id': u'fäké1', 'name': u'fäké1', 'enabled': True}
+ domain = self._get_domain_fixture()
+ tenant = {'id': u'fäké1', 'name': u'fäké1', 'enabled': True,
+ 'domain_id': domain['id']}
self.resource_api.create_project(u'fäké1', tenant)
tenant_ref = self.resource_api.get_project(u'fäké1')
self.assertEqual(u'fäké1', tenant_ref['id'])
@@ -990,7 +1054,8 @@ class LDAPIdentity(BaseLDAPIdentity, tests.TestCase):
project_allow_update=False, project_allow_delete=False)
self.load_backends()
- tenant = {'id': u'fäké1', 'name': u'fäké1'}
+ domain = self._get_domain_fixture()
+ tenant = {'id': u'fäké1', 'name': u'fäké1', 'domain_id': domain['id']}
self.assertRaises(exception.ForbiddenAction,
self.resource_api.create_project,
u'fäké1',
@@ -1029,7 +1094,7 @@ class LDAPIdentity(BaseLDAPIdentity, tests.TestCase):
def test_dumb_member(self):
self.config_fixture.config(group='ldap', use_dumb_member=True)
- self.clear_database()
+ self.ldapdb.clear()
self.load_backends()
self.load_fixtures(default_fixtures)
dumb_id = common_ldap.BaseLdap._dn_to_id(CONF.ldap.dumb_member)
@@ -1042,7 +1107,7 @@ class LDAPIdentity(BaseLDAPIdentity, tests.TestCase):
group='ldap', project_name_attribute='ou',
project_desc_attribute='description',
project_enabled_attribute='enabled')
- self.clear_database()
+ self.ldapdb.clear()
self.load_backends()
self.load_fixtures(default_fixtures)
# NOTE(morganfainberg): CONF.ldap.project_name_attribute,
@@ -1087,7 +1152,7 @@ class LDAPIdentity(BaseLDAPIdentity, tests.TestCase):
self.config_fixture.config(
group='ldap',
project_attribute_ignore=['name', 'description', 'enabled'])
- self.clear_database()
+ self.ldapdb.clear()
self.load_backends()
self.load_fixtures(default_fixtures)
# NOTE(morganfainberg): CONF.ldap.project_attribute_ignore will not be
@@ -1107,7 +1172,7 @@ class LDAPIdentity(BaseLDAPIdentity, tests.TestCase):
def test_user_enable_attribute_mask(self):
self.config_fixture.config(group='ldap', user_enabled_mask=2,
user_enabled_default='512')
- self.clear_database()
+ self.ldapdb.clear()
self.load_backends()
self.load_fixtures(default_fixtures)
@@ -1155,7 +1220,7 @@ class LDAPIdentity(BaseLDAPIdentity, tests.TestCase):
def test_user_enabled_invert(self):
self.config_fixture.config(group='ldap', user_enabled_invert=True,
user_enabled_default=False)
- self.clear_database()
+ self.ldapdb.clear()
self.load_backends()
self.load_fixtures(default_fixtures)
@@ -1426,6 +1491,26 @@ class LDAPIdentity(BaseLDAPIdentity, tests.TestCase):
new_user = [u for u in res if u['id'] == user['id']][0]
self.assertThat(new_user['description'], matchers.Equals(description))
+ def test_user_with_missing_id(self):
+ # create a user that doesn't have the id attribute
+ ldap_ = self.identity_api.driver.user.get_connection()
+ # `sn` is used for the attribute in the DN because it's allowed by
+ # the entry's objectclasses so that this test could conceivably run in
+ # the live tests.
+ ldap_id_field = 'sn'
+ ldap_id_value = uuid.uuid4().hex
+ dn = '%s=%s,ou=Users,cn=example,cn=com' % (ldap_id_field,
+ ldap_id_value)
+ modlist = [('objectClass', ['person', 'inetOrgPerson']),
+ (ldap_id_field, [ldap_id_value]),
+ ('mail', ['email@example.com']),
+ ('userPassword', [uuid.uuid4().hex])]
+ ldap_.add_s(dn, modlist)
+
+ # make sure the user doesn't break other users
+ users = self.identity_api.driver.user.get_all()
+ self.assertThat(users, matchers.HasLength(len(default_fixtures.USERS)))
+
@mock.patch.object(common_ldap_core.BaseLdap, '_ldap_get')
def test_user_mixed_case_attribute(self, mock_ldap_get):
# Mock the search results to return attribute names
@@ -1531,7 +1616,8 @@ class LDAPIdentity(BaseLDAPIdentity, tests.TestCase):
'domain_id': CONF.identity.default_domain_id,
'description': uuid.uuid4().hex,
'enabled': True,
- 'parent_id': None}
+ 'parent_id': None,
+ 'is_domain': False}
self.resource_api.create_project(project['id'], project)
project_ref = self.resource_api.get_project(project['id'])
@@ -1609,7 +1695,8 @@ class LDAPIdentity(BaseLDAPIdentity, tests.TestCase):
'description': '',
'domain_id': domain['id'],
'enabled': True,
- 'parent_id': None}
+ 'parent_id': None,
+ 'is_domain': False}
self.resource_api.create_project(project1['id'], project1)
# Creating project2 under project1. LDAP will not allow
@@ -1619,7 +1706,8 @@ class LDAPIdentity(BaseLDAPIdentity, tests.TestCase):
'description': '',
'domain_id': domain['id'],
'enabled': True,
- 'parent_id': project1['id']}
+ 'parent_id': project1['id'],
+ 'is_domain': False}
self.assertRaises(exception.InvalidParentProject,
self.resource_api.create_project,
@@ -1633,6 +1721,58 @@ class LDAPIdentity(BaseLDAPIdentity, tests.TestCase):
# Returning projects to be used across the tests
return [project1, project2]
+ def _assert_create_is_domain_project_not_allowed(self):
+ """Tests that we can't create more than one project acting as domain.
+
+ This method will be used at any test that require the creation of a
+ project that act as a domain. LDAP does not support multiple domains
+ and the only domain it has (default) is immutable.
+ """
+ domain = self._get_domain_fixture()
+ project = {'id': uuid.uuid4().hex,
+ 'name': uuid.uuid4().hex,
+ 'description': '',
+ 'domain_id': domain['id'],
+ 'enabled': True,
+ 'parent_id': None,
+ 'is_domain': True}
+
+ self.assertRaises(exception.ValidationError,
+ self.resource_api.create_project,
+ project['id'], project)
+
+ def test_update_is_domain_field(self):
+ domain = self._get_domain_fixture()
+ project = {'id': uuid.uuid4().hex,
+ 'name': uuid.uuid4().hex,
+ 'description': '',
+ 'domain_id': domain['id'],
+ 'enabled': True,
+ 'parent_id': None,
+ 'is_domain': False}
+ self.resource_api.create_project(project['id'], project)
+
+ # Try to update the is_domain field to True
+ project['is_domain'] = True
+ self.assertRaises(exception.ValidationError,
+ self.resource_api.update_project,
+ project['id'], project)
+
+ def test_delete_is_domain_project(self):
+ self._assert_create_is_domain_project_not_allowed()
+
+ def test_create_domain_under_regular_project_hierarchy_fails(self):
+ self._assert_create_hierarchy_not_allowed()
+
+ def test_create_not_is_domain_project_under_is_domain_hierarchy(self):
+ self._assert_create_hierarchy_not_allowed()
+
+ def test_create_is_domain_project(self):
+ self._assert_create_is_domain_project_not_allowed()
+
+ def test_create_project_with_parent_id_and_without_domain_id(self):
+ self._assert_create_hierarchy_not_allowed()
+
def test_check_leaf_projects(self):
projects = self._assert_create_hierarchy_not_allowed()
for project in projects:
@@ -1642,13 +1782,17 @@ class LDAPIdentity(BaseLDAPIdentity, tests.TestCase):
projects = self._assert_create_hierarchy_not_allowed()
for project in projects:
subtree_list = self.resource_api.list_projects_in_subtree(
- project)
+ project['id'])
self.assertEqual(0, len(subtree_list))
+ def test_list_projects_in_subtree_with_circular_reference(self):
+ self._assert_create_hierarchy_not_allowed()
+
def test_list_project_parents(self):
projects = self._assert_create_hierarchy_not_allowed()
for project in projects:
- parents_list = self.resource_api.list_project_parents(project)
+ parents_list = self.resource_api.list_project_parents(
+ project['id'])
self.assertEqual(0, len(parents_list))
def test_hierarchical_projects_crud(self):
@@ -1826,9 +1970,9 @@ class LDAPIdentity(BaseLDAPIdentity, tests.TestCase):
self.assertEqual(set(expected_group_ids), group_ids)
def test_user_id_attribute_in_create(self):
- conf = self.get_config(CONF.identity.default_domain_id)
- conf.ldap.user_id_attribute = 'mail'
- self.reload_backends(CONF.identity.default_domain_id)
+ driver = self.identity_api._select_identity_driver(
+ CONF.identity.default_domain_id)
+ driver.user.id_attr = 'mail'
user = {'name': u'fäké1',
'password': u'fäképass1',
@@ -1840,9 +1984,9 @@ class LDAPIdentity(BaseLDAPIdentity, tests.TestCase):
self.assertEqual(user_ref['id'], user_ref['email'])
def test_user_id_attribute_map(self):
- conf = self.get_config(CONF.identity.default_domain_id)
- conf.ldap.user_id_attribute = 'mail'
- self.reload_backends(CONF.identity.default_domain_id)
+ driver = self.identity_api._select_identity_driver(
+ CONF.identity.default_domain_id)
+ driver.user.id_attr = 'mail'
user_ref = self.identity_api.get_user(self.user_foo['email'])
# the user_id_attribute map should be honored, which means
@@ -1851,9 +1995,9 @@ class LDAPIdentity(BaseLDAPIdentity, tests.TestCase):
@mock.patch.object(common_ldap_core.BaseLdap, '_ldap_get')
def test_get_id_from_dn_for_multivalued_attribute_id(self, mock_ldap_get):
- conf = self.get_config(CONF.identity.default_domain_id)
- conf.ldap.user_id_attribute = 'mail'
- self.reload_backends(CONF.identity.default_domain_id)
+ driver = self.identity_api._select_identity_driver(
+ CONF.identity.default_domain_id)
+ driver.user.id_attr = 'mail'
# make 'email' multivalued so we can test the error condition
email1 = uuid.uuid4().hex
@@ -1888,10 +2032,10 @@ class LDAPIdentity(BaseLDAPIdentity, tests.TestCase):
@mock.patch.object(common_ldap_core.BaseLdap, '_ldap_get')
def test_user_id_not_in_dn(self, mock_ldap_get):
- conf = self.get_config(CONF.identity.default_domain_id)
- conf.ldap.user_id_attribute = 'uid'
- conf.ldap.user_name_attribute = 'cn'
- self.reload_backends(CONF.identity.default_domain_id)
+ driver = self.identity_api._select_identity_driver(
+ CONF.identity.default_domain_id)
+ driver.user.id_attr = 'uid'
+ driver.user.attribute_mapping['name'] = 'cn'
mock_ldap_get.return_value = (
'foo=bar,dc=example,dc=com',
@@ -1908,10 +2052,10 @@ class LDAPIdentity(BaseLDAPIdentity, tests.TestCase):
@mock.patch.object(common_ldap_core.BaseLdap, '_ldap_get')
def test_user_name_in_dn(self, mock_ldap_get):
- conf = self.get_config(CONF.identity.default_domain_id)
- conf.ldap.user_id_attribute = 'sAMAccountName'
- conf.ldap.user_name_attribute = 'cn'
- self.reload_backends(CONF.identity.default_domain_id)
+ driver = self.identity_api._select_identity_driver(
+ CONF.identity.default_domain_id)
+ driver.user.id_attr = 'SAMAccountName'
+ driver.user.attribute_mapping['name'] = 'cn'
mock_ldap_get.return_value = (
'cn=Foo Bar,dc=example,dc=com',
@@ -1929,12 +2073,16 @@ class LDAPIdentity(BaseLDAPIdentity, tests.TestCase):
class LDAPIdentityEnabledEmulation(LDAPIdentity):
def setUp(self):
super(LDAPIdentityEnabledEmulation, self).setUp()
- self.clear_database()
+ self.ldapdb.clear()
self.load_backends()
self.load_fixtures(default_fixtures)
for obj in [self.tenant_bar, self.tenant_baz, self.user_foo,
self.user_two, self.user_badguy]:
obj.setdefault('enabled', True)
+ _assert_backends(self,
+ assignment='ldap',
+ identity='ldap',
+ resource='ldap')
def load_fixtures(self, fixtures):
# Override super impl since need to create group container.
@@ -1961,7 +2109,8 @@ class LDAPIdentityEnabledEmulation(LDAPIdentity):
'name': uuid.uuid4().hex,
'domain_id': CONF.identity.default_domain_id,
'description': uuid.uuid4().hex,
- 'parent_id': None}
+ 'parent_id': None,
+ 'is_domain': False}
self.resource_api.create_project(project['id'], project)
project_ref = self.resource_api.get_project(project['id'])
@@ -2007,9 +2156,9 @@ class LDAPIdentityEnabledEmulation(LDAPIdentity):
user['id'])
def test_user_auth_emulated(self):
- self.config_fixture.config(group='ldap',
- user_enabled_emulation_dn='cn=test,dc=test')
- self.reload_backends(CONF.identity.default_domain_id)
+ driver = self.identity_api._select_identity_driver(
+ CONF.identity.default_domain_id)
+ driver.user.enabled_emulation_dn = 'cn=test,dc=test'
self.identity_api.authenticate(
context={},
user_id=self.user_foo['id'],
@@ -2022,7 +2171,7 @@ class LDAPIdentityEnabledEmulation(LDAPIdentity):
def test_user_enabled_invert(self):
self.config_fixture.config(group='ldap', user_enabled_invert=True,
user_enabled_default=False)
- self.clear_database()
+ self.ldapdb.clear()
self.load_backends()
self.load_fixtures(default_fixtures)
@@ -2110,32 +2259,26 @@ class LdapIdentitySqlAssignment(BaseLDAPIdentity, tests.SQLDriverOverrides,
return config_files
def setUp(self):
- self.useFixture(database.Database())
+ sqldb = self.useFixture(database.Database())
super(LdapIdentitySqlAssignment, self).setUp()
- self.clear_database()
+ self.ldapdb.clear()
self.load_backends()
cache.configure_cache_region(cache.REGION)
- self.engine = sql.get_engine()
- self.addCleanup(sql.cleanup)
-
- sql.ModelBase.metadata.create_all(bind=self.engine)
- self.addCleanup(sql.ModelBase.metadata.drop_all, bind=self.engine)
+ sqldb.recreate()
self.load_fixtures(default_fixtures)
# defaulted by the data load
self.user_foo['enabled'] = True
+ _assert_backends(self,
+ assignment='sql',
+ identity='ldap',
+ resource='sql')
def config_overrides(self):
super(LdapIdentitySqlAssignment, self).config_overrides()
- self.config_fixture.config(
- group='identity',
- driver='keystone.identity.backends.ldap.Identity')
- self.config_fixture.config(
- group='resource',
- driver='keystone.resource.backends.sql.Resource')
- self.config_fixture.config(
- group='assignment',
- driver='keystone.assignment.backends.sql.Assignment')
+ self.config_fixture.config(group='identity', driver='ldap')
+ self.config_fixture.config(group='resource', driver='sql')
+ self.config_fixture.config(group='assignment', driver='sql')
def test_domain_crud(self):
pass
@@ -2214,6 +2357,11 @@ class LdapIdentitySqlAssignment(BaseLDAPIdentity, tests.SQLDriverOverrides,
self.skipTest("Doesn't apply since LDAP configuration is ignored for "
"SQL assignment backend.")
+ def test_list_role_assignments_filtered_by_role(self):
+ # Domain roles are supported by the SQL Assignment backend
+ base = super(BaseLDAPIdentity, self)
+ base.test_list_role_assignments_filtered_by_role()
+
class LdapIdentitySqlAssignmentWithMapping(LdapIdentitySqlAssignment):
"""Class to test mapping of default LDAP backend.
@@ -2390,16 +2538,11 @@ class MultiLDAPandSQLIdentity(BaseLDAPIdentity, tests.SQLDriverOverrides,
"""
def setUp(self):
- self.useFixture(database.Database())
+ sqldb = self.useFixture(database.Database())
super(MultiLDAPandSQLIdentity, self).setUp()
self.load_backends()
-
- self.engine = sql.get_engine()
- self.addCleanup(sql.cleanup)
-
- sql.ModelBase.metadata.create_all(bind=self.engine)
- self.addCleanup(sql.ModelBase.metadata.drop_all, bind=self.engine)
+ sqldb.recreate()
self.domain_count = 5
self.domain_specific_count = 3
@@ -2410,23 +2553,29 @@ class MultiLDAPandSQLIdentity(BaseLDAPIdentity, tests.SQLDriverOverrides,
# for separate backends per domain.
self.enable_multi_domain()
- self.clear_database()
+ self.ldapdb.clear()
self.load_fixtures(default_fixtures)
self.create_users_across_domains()
+ self.assert_backends()
+
+ def assert_backends(self):
+ _assert_backends(self,
+ assignment='sql',
+ identity={
+ None: 'sql',
+ self.domains['domain_default']['id']: 'ldap',
+ self.domains['domain1']['id']: 'ldap',
+ self.domains['domain2']['id']: 'ldap',
+ },
+ resource='sql')
def config_overrides(self):
super(MultiLDAPandSQLIdentity, self).config_overrides()
# Make sure identity and assignment are actually SQL drivers,
# BaseLDAPIdentity sets these options to use LDAP.
- self.config_fixture.config(
- group='identity',
- driver='keystone.identity.backends.sql.Identity')
- self.config_fixture.config(
- group='resource',
- driver='keystone.resource.backends.sql.Resource')
- self.config_fixture.config(
- group='assignment',
- driver='keystone.assignment.backends.sql.Assignment')
+ self.config_fixture.config(group='identity', driver='sql')
+ self.config_fixture.config(group='resource', driver='sql')
+ self.config_fixture.config(group='assignment', driver='sql')
def _setup_initial_users(self):
# Create some identity entities BEFORE we switch to multi-backend, so
@@ -2453,11 +2602,6 @@ class MultiLDAPandSQLIdentity(BaseLDAPIdentity, tests.SQLDriverOverrides,
self.config_fixture.config(group='identity_mapping',
backward_compatible_ids=False)
- def reload_backends(self, domain_id):
- # Just reload the driver for this domain - which will pickup
- # any updated cfg
- self.identity_api.domain_configs.reload_domain_driver(domain_id)
-
def get_config(self, domain_id):
# Get the config for this domain, will return CONF
# if no specific config defined for this domain
@@ -2619,7 +2763,8 @@ class MultiLDAPandSQLIdentity(BaseLDAPIdentity, tests.SQLDriverOverrides,
'domain_id': domain['id'],
'description': uuid.uuid4().hex,
'parent_id': None,
- 'enabled': True}
+ 'enabled': True,
+ 'is_domain': False}
self.resource_api.create_domain(domain['id'], domain)
self.resource_api.create_project(project['id'], project)
project_ref = self.resource_api.get_project(project['id'])
@@ -2653,6 +2798,11 @@ class MultiLDAPandSQLIdentity(BaseLDAPIdentity, tests.SQLDriverOverrides,
self.skipTest("Doesn't apply since LDAP configuration is ignored for "
"SQL assignment backend.")
+ def test_list_role_assignments_filtered_by_role(self):
+ # Domain roles are supported by the SQL Assignment backend
+ base = super(BaseLDAPIdentity, self)
+ base.test_list_role_assignments_filtered_by_role()
+
class MultiLDAPandSQLIdentityDomainConfigsInSQL(MultiLDAPandSQLIdentity):
"""Class to test the use of domain configs stored in the database.
@@ -2662,6 +2812,18 @@ class MultiLDAPandSQLIdentityDomainConfigsInSQL(MultiLDAPandSQLIdentity):
database.
"""
+
+ def assert_backends(self):
+ _assert_backends(self,
+ assignment='sql',
+ identity={
+ None: 'sql',
+ self.domains['domain_default']['id']: 'ldap',
+ self.domains['domain1']['id']: 'ldap',
+ self.domains['domain2']['id']: 'ldap',
+ },
+ resource='sql')
+
def enable_multi_domain(self):
# The values below are the same as in the domain_configs_multi_ldap
# cdirectory of test config_files.
@@ -2670,14 +2832,14 @@ class MultiLDAPandSQLIdentityDomainConfigsInSQL(MultiLDAPandSQLIdentity):
'user': 'cn=Admin',
'password': 'password',
'suffix': 'cn=example,cn=com'},
- 'identity': {'driver': 'keystone.identity.backends.ldap.Identity'}
+ 'identity': {'driver': 'ldap'}
}
domain1_config = {
'ldap': {'url': 'fake://memory1',
'user': 'cn=Admin',
'password': 'password',
'suffix': 'cn=example,cn=com'},
- 'identity': {'driver': 'keystone.identity.backends.ldap.Identity'}
+ 'identity': {'driver': 'ldap'}
}
domain2_config = {
'ldap': {'url': 'fake://memory',
@@ -2686,7 +2848,7 @@ class MultiLDAPandSQLIdentityDomainConfigsInSQL(MultiLDAPandSQLIdentity):
'suffix': 'cn=myroot,cn=com',
'group_tree_dn': 'ou=UserGroups,dc=myroot,dc=org',
'user_tree_dn': 'ou=Users,dc=myroot,dc=org'},
- 'identity': {'driver': 'keystone.identity.backends.ldap.Identity'}
+ 'identity': {'driver': 'ldap'}
}
self.domain_config_api.create_config(CONF.identity.default_domain_id,
@@ -2725,6 +2887,48 @@ class MultiLDAPandSQLIdentityDomainConfigsInSQL(MultiLDAPandSQLIdentity):
CONF.identity.default_domain_id))
self.assertEqual(CONF.ldap.url, default_config.ldap.url)
+ def test_reloading_domain_config(self):
+ """Ensure domain drivers are reloaded on a config modification."""
+
+ domain_cfgs = self.identity_api.domain_configs
+
+ # Create a new config for the default domain, hence overwriting the
+ # current settings.
+ new_config = {
+ 'ldap': {'url': uuid.uuid4().hex},
+ 'identity': {'driver': 'ldap'}}
+ self.domain_config_api.create_config(
+ CONF.identity.default_domain_id, new_config)
+ default_config = (
+ domain_cfgs.get_domain_conf(CONF.identity.default_domain_id))
+ self.assertEqual(new_config['ldap']['url'], default_config.ldap.url)
+
+ # Ensure updating is also honored
+ updated_config = {'url': uuid.uuid4().hex}
+ self.domain_config_api.update_config(
+ CONF.identity.default_domain_id, updated_config,
+ group='ldap', option='url')
+ default_config = (
+ domain_cfgs.get_domain_conf(CONF.identity.default_domain_id))
+ self.assertEqual(updated_config['url'], default_config.ldap.url)
+
+ # ...and finally ensure delete causes the driver to get the standard
+ # config again.
+ self.domain_config_api.delete_config(CONF.identity.default_domain_id)
+ default_config = (
+ domain_cfgs.get_domain_conf(CONF.identity.default_domain_id))
+ self.assertEqual(CONF.ldap.url, default_config.ldap.url)
+
+ def test_setting_sql_driver_raises_exception(self):
+ """Ensure setting of domain specific sql driver is prevented."""
+
+ new_config = {'identity': {'driver': 'sql'}}
+ self.domain_config_api.create_config(
+ CONF.identity.default_domain_id, new_config)
+ self.assertRaises(exception.InvalidDomainConfig,
+ self.identity_api.domain_configs.get_domain_conf,
+ CONF.identity.default_domain_id)
+
class DomainSpecificLDAPandSQLIdentity(
BaseLDAPIdentity, tests.SQLDriverOverrides, tests.TestCase,
@@ -2740,11 +2944,11 @@ class DomainSpecificLDAPandSQLIdentity(
"""
def setUp(self):
- self.useFixture(database.Database())
+ sqldb = self.useFixture(database.Database())
super(DomainSpecificLDAPandSQLIdentity, self).setUp()
- self.initial_setup()
+ self.initial_setup(sqldb)
- def initial_setup(self):
+ def initial_setup(self, sqldb):
# We aren't setting up any initial data ahead of switching to
# domain-specific operation, so make the switch straight away.
self.config_fixture.config(
@@ -2755,37 +2959,33 @@ class DomainSpecificLDAPandSQLIdentity(
backward_compatible_ids=False)
self.load_backends()
-
- self.engine = sql.get_engine()
- self.addCleanup(sql.cleanup)
-
- sql.ModelBase.metadata.create_all(bind=self.engine)
- self.addCleanup(sql.ModelBase.metadata.drop_all, bind=self.engine)
+ sqldb.recreate()
self.domain_count = 2
self.domain_specific_count = 2
self.setup_initial_domains()
self.users = {}
- self.clear_database()
+ self.ldapdb.clear()
self.load_fixtures(default_fixtures)
self.create_users_across_domains()
+ _assert_backends(
+ self,
+ assignment='sql',
+ identity={
+ None: 'ldap',
+ 'default': 'ldap',
+ self.domains['domain1']['id']: 'sql',
+ },
+ resource='sql')
+
def config_overrides(self):
super(DomainSpecificLDAPandSQLIdentity, self).config_overrides()
# Make sure resource & assignment are actually SQL drivers,
# BaseLDAPIdentity causes this option to use LDAP.
- self.config_fixture.config(
- group='resource',
- driver='keystone.resource.backends.sql.Resource')
- self.config_fixture.config(
- group='assignment',
- driver='keystone.assignment.backends.sql.Assignment')
-
- def reload_backends(self, domain_id):
- # Just reload the driver for this domain - which will pickup
- # any updated cfg
- self.identity_api.domain_configs.reload_domain_driver(domain_id)
+ self.config_fixture.config(group='resource', driver='sql')
+ self.config_fixture.config(group='assignment', driver='sql')
def get_config(self, domain_id):
# Get the config for this domain, will return CONF
@@ -2889,6 +3089,11 @@ class DomainSpecificLDAPandSQLIdentity(
self.skipTest("Doesn't apply since LDAP configuration is ignored for "
"SQL assignment backend.")
+ def test_list_role_assignments_filtered_by_role(self):
+ # Domain roles are supported by the SQL Assignment backend
+ base = super(BaseLDAPIdentity, self)
+ base.test_list_role_assignments_filtered_by_role()
+
class DomainSpecificSQLIdentity(DomainSpecificLDAPandSQLIdentity):
"""Class to test simplest use of domain-specific SQL driver.
@@ -2902,7 +3107,7 @@ class DomainSpecificSQLIdentity(DomainSpecificLDAPandSQLIdentity):
- A separate SQL backend for domain1
"""
- def initial_setup(self):
+ def initial_setup(self, sqldb):
# We aren't setting up any initial data ahead of switching to
# domain-specific operation, so make the switch straight away.
self.config_fixture.config(
@@ -2916,12 +3121,7 @@ class DomainSpecificSQLIdentity(DomainSpecificLDAPandSQLIdentity):
backward_compatible_ids=True)
self.load_backends()
-
- self.engine = sql.get_engine()
- self.addCleanup(sql.cleanup)
-
- sql.ModelBase.metadata.create_all(bind=self.engine)
- self.addCleanup(sql.ModelBase.metadata.drop_all, bind=self.engine)
+ sqldb.recreate()
self.domain_count = 2
self.domain_specific_count = 1
@@ -2931,17 +3131,16 @@ class DomainSpecificSQLIdentity(DomainSpecificLDAPandSQLIdentity):
self.load_fixtures(default_fixtures)
self.create_users_across_domains()
+ _assert_backends(self,
+ assignment='sql',
+ identity='ldap',
+ resource='sql')
+
def config_overrides(self):
super(DomainSpecificSQLIdentity, self).config_overrides()
- self.config_fixture.config(
- group='identity',
- driver='keystone.identity.backends.ldap.Identity')
- self.config_fixture.config(
- group='resource',
- driver='keystone.resource.backends.sql.Resource')
- self.config_fixture.config(
- group='assignment',
- driver='keystone.assignment.backends.sql.Assignment')
+ self.config_fixture.config(group='identity', driver='ldap')
+ self.config_fixture.config(group='resource', driver='sql')
+ self.config_fixture.config(group='assignment', driver='sql')
def get_config(self, domain_id):
if domain_id == CONF.identity.default_domain_id:
@@ -2949,36 +3148,20 @@ class DomainSpecificSQLIdentity(DomainSpecificLDAPandSQLIdentity):
else:
return self.identity_api.domain_configs.get_domain_conf(domain_id)
- def reload_backends(self, domain_id):
- if domain_id == CONF.identity.default_domain_id:
- self.load_backends()
- else:
- # Just reload the driver for this domain - which will pickup
- # any updated cfg
- self.identity_api.domain_configs.reload_domain_driver(domain_id)
-
def test_default_sql_plus_sql_specific_driver_fails(self):
# First confirm that if ldap is default driver, domain1 can be
# loaded as sql
- self.config_fixture.config(
- group='identity',
- driver='keystone.identity.backends.ldap.Identity')
- self.config_fixture.config(
- group='assignment',
- driver='keystone.assignment.backends.sql.Assignment')
+ self.config_fixture.config(group='identity', driver='ldap')
+ self.config_fixture.config(group='assignment', driver='sql')
self.load_backends()
# Make any identity call to initiate the lazy loading of configs
self.identity_api.list_users(
domain_scope=CONF.identity.default_domain_id)
self.assertIsNotNone(self.get_config(self.domains['domain1']['id']))
- # Now re-initialize, but with sql as the default identity driver
- self.config_fixture.config(
- group='identity',
- driver='keystone.identity.backends.sql.Identity')
- self.config_fixture.config(
- group='assignment',
- driver='keystone.assignment.backends.sql.Assignment')
+ # Now re-initialize, but with sql as the identity driver
+ self.config_fixture.config(group='identity', driver='sql')
+ self.config_fixture.config(group='assignment', driver='sql')
self.load_backends()
# Make any identity call to initiate the lazy loading of configs, which
# should fail since we would now have two sql drivers.
@@ -2987,12 +3170,8 @@ class DomainSpecificSQLIdentity(DomainSpecificLDAPandSQLIdentity):
domain_scope=CONF.identity.default_domain_id)
def test_multiple_sql_specific_drivers_fails(self):
- self.config_fixture.config(
- group='identity',
- driver='keystone.identity.backends.ldap.Identity')
- self.config_fixture.config(
- group='assignment',
- driver='keystone.assignment.backends.sql.Assignment')
+ self.config_fixture.config(group='identity', driver='ldap')
+ self.config_fixture.config(group='assignment', driver='sql')
self.load_backends()
# Ensure default, domain1 and domain2 exist
self.domain_count = 3
@@ -3019,31 +3198,30 @@ class LdapFilterTests(test_backend.FilterTests, tests.TestCase):
def setUp(self):
super(LdapFilterTests, self).setUp()
- self.useFixture(database.Database())
- self.clear_database()
+ sqldb = self.useFixture(database.Database())
+ self.useFixture(ldapdb.LDAPDatabase())
- common_ldap.register_handler('fake://', fakeldap.FakeLdap)
self.load_backends()
self.load_fixtures(default_fixtures)
-
- self.engine = sql.get_engine()
- self.addCleanup(sql.cleanup)
- sql.ModelBase.metadata.create_all(bind=self.engine)
-
- self.addCleanup(sql.ModelBase.metadata.drop_all, bind=self.engine)
- self.addCleanup(common_ldap_core._HANDLERS.clear)
+ sqldb.recreate()
+ _assert_backends(self, assignment='ldap', identity='ldap')
def config_overrides(self):
super(LdapFilterTests, self).config_overrides()
- self.config_fixture.config(
- group='identity',
- driver='keystone.identity.backends.ldap.Identity')
+ self.config_fixture.config(group='identity', driver='ldap')
def config_files(self):
config_files = super(LdapFilterTests, self).config_files()
config_files.append(tests.dirs.tests_conf('backend_ldap.conf'))
return config_files
- def clear_database(self):
- for shelf in fakeldap.FakeShelves:
- fakeldap.FakeShelves[shelf].clear()
+ def test_list_users_in_group_filtered(self):
+ # The LDAP identity driver currently does not support filtering on the
+ # listing users for a given group, so will fail this test.
+ try:
+ super(LdapFilterTests, self).test_list_users_in_group_filtered()
+ except matchers.MismatchError:
+ return
+ # We shouldn't get here...if we do, it means someone has implemented
+ # filtering, so we can remove this test override.
+ self.assertTrue(False)
diff --git a/keystone-moon/keystone/tests/unit/test_backend_ldap_pool.py b/keystone-moon/keystone/tests/unit/test_backend_ldap_pool.py
index eee03b8b..66827d7e 100644
--- a/keystone-moon/keystone/tests/unit/test_backend_ldap_pool.py
+++ b/keystone-moon/keystone/tests/unit/test_backend_ldap_pool.py
@@ -210,9 +210,7 @@ class LdapPoolCommonTestMixin(object):
class LdapIdentitySqlAssignment(LdapPoolCommonTestMixin,
test_backend_ldap.LdapIdentitySqlAssignment,
tests.TestCase):
- '''Executes existing base class 150+ tests with pooled LDAP handler to make
- sure it works without any error.
- '''
+ """Executes tests in existing base class with pooled LDAP handler."""
def setUp(self):
self.useFixture(mockpatch.PatchObject(
ldap_core.PooledLDAPHandler, 'Connector', fakeldap.FakeLdapPool))
diff --git a/keystone-moon/keystone/tests/unit/test_backend_rules.py b/keystone-moon/keystone/tests/unit/test_backend_rules.py
index c9c4f151..bc0dc13d 100644
--- a/keystone-moon/keystone/tests/unit/test_backend_rules.py
+++ b/keystone-moon/keystone/tests/unit/test_backend_rules.py
@@ -25,9 +25,7 @@ class RulesPolicy(tests.TestCase, test_backend.PolicyTests):
def config_overrides(self):
super(RulesPolicy, self).config_overrides()
- self.config_fixture.config(
- group='policy',
- driver='keystone.policy.backends.rules.Policy')
+ self.config_fixture.config(group='policy', driver='rules')
def test_create(self):
self.assertRaises(exception.NotImplemented,
diff --git a/keystone-moon/keystone/tests/unit/test_backend_sql.py b/keystone-moon/keystone/tests/unit/test_backend_sql.py
index a7c63bf6..bf50ac21 100644
--- a/keystone-moon/keystone/tests/unit/test_backend_sql.py
+++ b/keystone-moon/keystone/tests/unit/test_backend_sql.py
@@ -20,6 +20,7 @@ import mock
from oslo_config import cfg
from oslo_db import exception as db_exception
from oslo_db import options
+from six.moves import range
import sqlalchemy
from sqlalchemy import exc
from testtools import matchers
@@ -28,7 +29,6 @@ from keystone.common import driver_hints
from keystone.common import sql
from keystone import exception
from keystone.identity.backends import sql as identity_sql
-from keystone.openstack.common import versionutils
from keystone.tests import unit as tests
from keystone.tests.unit import default_fixtures
from keystone.tests.unit.ksfixtures import database
@@ -67,18 +67,67 @@ class SqlModels(SqlTests):
s = sqlalchemy.select([table])
return s
- def assertExpectedSchema(self, table, cols):
+ def assertExpectedSchema(self, table, expected_schema):
+ """Assert that a table's schema is what we expect.
+
+ :param string table: the name of the table to inspect
+ :param tuple expected_schema: a tuple of tuples containing the
+ expected schema
+ :raises AssertionError: when the database schema doesn't match the
+ expected schema
+
+ The expected_schema format is simply::
+
+ (
+ ('column name', sql type, qualifying detail),
+ ...
+ )
+
+ The qualifying detail varies based on the type of the column::
+
+ - sql.Boolean columns must indicate the column's default value or
+ None if there is no default
+ - Columns with a length, like sql.String, must indicate the
+ column's length
+ - All other column types should use None
+
+ Example::
+
+ cols = (('id', sql.String, 64),
+ ('enabled', sql.Boolean, True),
+ ('extra', sql.JsonBlob, None))
+ self.assertExpectedSchema('table_name', cols)
+
+ """
table = self.select_table(table)
- for col, type_, length in cols:
- self.assertIsInstance(table.c[col].type, type_)
- if length:
- self.assertEqual(length, table.c[col].type.length)
+
+ actual_schema = []
+ for column in table.c:
+ if isinstance(column.type, sql.Boolean):
+ default = None
+ if column._proxies[0].default:
+ default = column._proxies[0].default.arg
+ actual_schema.append((column.name, type(column.type), default))
+ elif (hasattr(column.type, 'length') and
+ not isinstance(column.type, sql.Enum)):
+ # NOTE(dstanek): Even though sql.Enum columns have a length
+ # set we don't want to catch them here. Maybe in the future
+ # we'll check to see that they contain a list of the correct
+ # possible values.
+ actual_schema.append((column.name,
+ type(column.type),
+ column.type.length))
+ else:
+ actual_schema.append((column.name, type(column.type), None))
+
+ self.assertItemsEqual(expected_schema, actual_schema)
def test_user_model(self):
cols = (('id', sql.String, 64),
('name', sql.String, 255),
('password', sql.String, 128),
('domain_id', sql.String, 64),
+ ('default_project_id', sql.String, 64),
('enabled', sql.Boolean, None),
('extra', sql.JsonBlob, None))
self.assertExpectedSchema('user', cols)
@@ -94,7 +143,8 @@ class SqlModels(SqlTests):
def test_domain_model(self):
cols = (('id', sql.String, 64),
('name', sql.String, 64),
- ('enabled', sql.Boolean, None))
+ ('enabled', sql.Boolean, True),
+ ('extra', sql.JsonBlob, None))
self.assertExpectedSchema('domain', cols)
def test_project_model(self):
@@ -104,7 +154,8 @@ class SqlModels(SqlTests):
('domain_id', sql.String, 64),
('enabled', sql.Boolean, None),
('extra', sql.JsonBlob, None),
- ('parent_id', sql.String, 64))
+ ('parent_id', sql.String, 64),
+ ('is_domain', sql.Boolean, False))
self.assertExpectedSchema('project', cols)
def test_role_assignment_model(self):
@@ -692,6 +743,9 @@ class SqlTokenCacheInvalidation(SqlTests, test_backend.TokenCacheInvalidation):
class SqlFilterTests(SqlTests, test_backend.FilterTests):
+ def _get_user_name_field_size(self):
+ return identity_sql.User.name.type.length
+
def clean_up_entities(self):
"""Clean up entity test data from Filter Test Cases."""
@@ -761,21 +815,6 @@ class SqlFilterTests(SqlTests, test_backend.FilterTests):
groups = self.identity_api.list_groups()
self.assertTrue(len(groups) > 0)
- def test_groups_for_user_filtered(self):
- # The SQL identity driver currently does not support filtering on the
- # listing groups for a given user, so will fail this test. This is
- # raised as bug #1412447.
- try:
- super(SqlFilterTests, self).test_groups_for_user_filtered()
- except matchers.MismatchError:
- return
- # We shouldn't get here...if we do, it means someone has fixed the
- # above defect, so we can remove this test override. As an aside, it
- # would be nice to have used self.assertRaises() around the call above
- # to achieve the logic here...but that does not seem to work when
- # wrapping another assert (it won't seem to catch the error).
- self.assertTrue(False)
-
class SqlLimitTests(SqlTests, test_backend.LimitTests):
def setUp(self):
@@ -881,68 +920,3 @@ class SqlCredential(SqlTests):
credentials = self.credential_api.list_credentials_for_user(
self.user_foo['id'])
self._validateCredentialList(credentials, self.user_credentials)
-
-
-class DeprecatedDecorators(SqlTests):
-
- def test_assignment_to_role_api(self):
- """Test that calling one of the methods does call LOG.deprecated.
-
- This method is really generic to the type of backend, but we need
- one to execute the test, so the SQL backend is as good as any.
-
- """
-
- # Rather than try and check that a log message is issued, we
- # enable fatal_deprecations so that we can check for the
- # raising of the exception.
-
- # First try to create a role without enabling fatal deprecations,
- # which should work due to the cross manager deprecated calls.
- role_ref = {
- 'id': uuid.uuid4().hex,
- 'name': uuid.uuid4().hex}
- self.assignment_api.create_role(role_ref['id'], role_ref)
- self.role_api.get_role(role_ref['id'])
-
- # Now enable fatal exceptions - creating a role by calling the
- # old manager should now fail.
- self.config_fixture.config(fatal_deprecations=True)
- role_ref = {
- 'id': uuid.uuid4().hex,
- 'name': uuid.uuid4().hex}
- self.assertRaises(versionutils.DeprecatedConfig,
- self.assignment_api.create_role,
- role_ref['id'], role_ref)
-
- def test_assignment_to_resource_api(self):
- """Test that calling one of the methods does call LOG.deprecated.
-
- This method is really generic to the type of backend, but we need
- one to execute the test, so the SQL backend is as good as any.
-
- """
-
- # Rather than try and check that a log message is issued, we
- # enable fatal_deprecations so that we can check for the
- # raising of the exception.
-
- # First try to create a project without enabling fatal deprecations,
- # which should work due to the cross manager deprecated calls.
- project_ref = {
- 'id': uuid.uuid4().hex,
- 'name': uuid.uuid4().hex,
- 'domain_id': DEFAULT_DOMAIN_ID}
- self.resource_api.create_project(project_ref['id'], project_ref)
- self.resource_api.get_project(project_ref['id'])
-
- # Now enable fatal exceptions - creating a project by calling the
- # old manager should now fail.
- self.config_fixture.config(fatal_deprecations=True)
- project_ref = {
- 'id': uuid.uuid4().hex,
- 'name': uuid.uuid4().hex,
- 'domain_id': DEFAULT_DOMAIN_ID}
- self.assertRaises(versionutils.DeprecatedConfig,
- self.assignment_api.create_project,
- project_ref['id'], project_ref)
diff --git a/keystone-moon/keystone/tests/unit/test_backend_templated.py b/keystone-moon/keystone/tests/unit/test_backend_templated.py
index a1c15fb1..82a8bed8 100644
--- a/keystone-moon/keystone/tests/unit/test_backend_templated.py
+++ b/keystone-moon/keystone/tests/unit/test_backend_templated.py
@@ -12,18 +12,20 @@
# License for the specific language governing permissions and limitations
# under the License.
-import os
import uuid
+import mock
+from six.moves import zip
+
+from keystone import catalog
from keystone.tests import unit as tests
from keystone.tests.unit import default_fixtures
from keystone.tests.unit.ksfixtures import database
from keystone.tests.unit import test_backend
-DEFAULT_CATALOG_TEMPLATES = os.path.abspath(os.path.join(
- os.path.dirname(__file__),
- 'default_catalog.templates'))
+BROKEN_WRITE_FUNCTIONALITY_MSG = ("Templated backend doesn't correctly "
+ "implement write operations")
class TestTemplatedCatalog(tests.TestCase, test_backend.CatalogTests):
@@ -55,8 +57,10 @@ class TestTemplatedCatalog(tests.TestCase, test_backend.CatalogTests):
def config_overrides(self):
super(TestTemplatedCatalog, self).config_overrides()
- self.config_fixture.config(group='catalog',
- template_file=DEFAULT_CATALOG_TEMPLATES)
+ self.config_fixture.config(
+ group='catalog',
+ driver='templated',
+ template_file=tests.dirs.tests('default_catalog.templates'))
def test_get_catalog(self):
catalog_ref = self.catalog_api.get_catalog('foo', 'bar')
@@ -120,8 +124,116 @@ class TestTemplatedCatalog(tests.TestCase, test_backend.CatalogTests):
'id': '1'}]
self.assert_catalogs_equal(exp_catalog, catalog_ref)
+ def test_get_catalog_ignores_endpoints_with_invalid_urls(self):
+ user_id = uuid.uuid4().hex
+ # If the URL has no 'tenant_id' to substitute, we will skip the
+ # endpoint which contains this kind of URL.
+ catalog_ref = self.catalog_api.get_v3_catalog(user_id, tenant_id=None)
+ exp_catalog = [
+ {'endpoints': [],
+ 'type': 'compute',
+ 'name': "'Compute Service'",
+ 'id': '2'},
+ {'endpoints': [
+ {'interface': 'admin',
+ 'region': 'RegionOne',
+ 'url': 'http://localhost:35357/v2.0'},
+ {'interface': 'public',
+ 'region': 'RegionOne',
+ 'url': 'http://localhost:5000/v2.0'},
+ {'interface': 'internal',
+ 'region': 'RegionOne',
+ 'url': 'http://localhost:35357/v2.0'}],
+ 'type': 'identity',
+ 'name': "'Identity Service'",
+ 'id': '1'}]
+ self.assert_catalogs_equal(exp_catalog, catalog_ref)
+
def test_list_regions_filtered_by_parent_region_id(self):
self.skipTest('Templated backend does not support hints')
def test_service_filtering(self):
self.skipTest("Templated backend doesn't support filtering")
+
+ # NOTE(dstanek): the following methods have been overridden
+ # from test_backend.CatalogTests
+
+ def test_region_crud(self):
+ self.skipTest(BROKEN_WRITE_FUNCTIONALITY_MSG)
+
+ @tests.skip_if_cache_disabled('catalog')
+ def test_cache_layer_region_crud(self):
+ self.skipTest(BROKEN_WRITE_FUNCTIONALITY_MSG)
+
+ @tests.skip_if_cache_disabled('catalog')
+ def test_invalidate_cache_when_updating_region(self):
+ self.skipTest(BROKEN_WRITE_FUNCTIONALITY_MSG)
+
+ def test_create_region_with_duplicate_id(self):
+ self.skipTest(BROKEN_WRITE_FUNCTIONALITY_MSG)
+
+ def test_delete_region_404(self):
+ self.skipTest(BROKEN_WRITE_FUNCTIONALITY_MSG)
+
+ def test_create_region_invalid_parent_region_404(self):
+ self.skipTest(BROKEN_WRITE_FUNCTIONALITY_MSG)
+
+ def test_avoid_creating_circular_references_in_regions_update(self):
+ self.skipTest(BROKEN_WRITE_FUNCTIONALITY_MSG)
+
+ @mock.patch.object(catalog.Driver,
+ "_ensure_no_circle_in_hierarchical_regions")
+ def test_circular_regions_can_be_deleted(self, mock_ensure_on_circle):
+ self.skipTest(BROKEN_WRITE_FUNCTIONALITY_MSG)
+
+ def test_service_crud(self):
+ self.skipTest(BROKEN_WRITE_FUNCTIONALITY_MSG)
+
+ @tests.skip_if_cache_disabled('catalog')
+ def test_cache_layer_service_crud(self):
+ self.skipTest(BROKEN_WRITE_FUNCTIONALITY_MSG)
+
+ @tests.skip_if_cache_disabled('catalog')
+ def test_invalidate_cache_when_updating_service(self):
+ self.skipTest(BROKEN_WRITE_FUNCTIONALITY_MSG)
+
+ def test_delete_service_with_endpoint(self):
+ self.skipTest(BROKEN_WRITE_FUNCTIONALITY_MSG)
+
+ def test_cache_layer_delete_service_with_endpoint(self):
+ self.skipTest(BROKEN_WRITE_FUNCTIONALITY_MSG)
+
+ def test_delete_service_404(self):
+ self.skipTest(BROKEN_WRITE_FUNCTIONALITY_MSG)
+
+ def test_update_endpoint_nonexistent_service(self):
+ self.skipTest(BROKEN_WRITE_FUNCTIONALITY_MSG)
+
+ def test_create_endpoint_nonexistent_region(self):
+ self.skipTest(BROKEN_WRITE_FUNCTIONALITY_MSG)
+
+ def test_update_endpoint_nonexistent_region(self):
+ self.skipTest(BROKEN_WRITE_FUNCTIONALITY_MSG)
+
+ def test_get_endpoint_404(self):
+ self.skipTest("Templated backend doesn't use IDs for endpoints.")
+
+ def test_delete_endpoint_404(self):
+ self.skipTest(BROKEN_WRITE_FUNCTIONALITY_MSG)
+
+ def test_create_endpoint(self):
+ self.skipTest(BROKEN_WRITE_FUNCTIONALITY_MSG)
+
+ def test_update_endpoint(self):
+ self.skipTest(BROKEN_WRITE_FUNCTIONALITY_MSG)
+
+ def test_list_endpoints(self):
+ # NOTE(dstanek): a future commit will fix this functionality and
+ # this test
+ expected_ids = set()
+ endpoints = self.catalog_api.list_endpoints()
+ self.assertEqual(expected_ids, set(e['id'] for e in endpoints))
+
+ @tests.skip_if_cache_disabled('catalog')
+ def test_invalidate_cache_when_updating_endpoint(self):
+ self.skipTest(BROKEN_WRITE_FUNCTIONALITY_MSG)
diff --git a/keystone-moon/keystone/tests/unit/test_cache.py b/keystone-moon/keystone/tests/unit/test_cache.py
index 5a778a07..c60df877 100644
--- a/keystone-moon/keystone/tests/unit/test_cache.py
+++ b/keystone-moon/keystone/tests/unit/test_cache.py
@@ -47,10 +47,12 @@ def _copy_value(value):
# backend unless you are running tests or expecting odd/strange results.
class CacheIsolatingProxy(proxy.ProxyBackend):
"""Proxy that forces a memory copy of stored values.
- The default in-memory cache-region does not perform a copy on values it
- is meant to cache. Therefore if the value is modified after set or after
- get, the cached value also is modified. This proxy does a copy as the last
+
+ The default in-memory cache-region does not perform a copy on values it is
+ meant to cache. Therefore if the value is modified after set or after get,
+ the cached value also is modified. This proxy does a copy as the last
thing before storing data.
+
"""
def get(self, key):
return _copy_value(self.proxied.get(key))
diff --git a/keystone-moon/keystone/tests/unit/test_cache_backend_mongo.py b/keystone-moon/keystone/tests/unit/test_cache_backend_mongo.py
index a56bf754..369570d6 100644
--- a/keystone-moon/keystone/tests/unit/test_cache_backend_mongo.py
+++ b/keystone-moon/keystone/tests/unit/test_cache_backend_mongo.py
@@ -20,6 +20,7 @@ import uuid
from dogpile.cache import api
from dogpile.cache import region as dp_region
import six
+from six.moves import range
from keystone.common.cache.backends import mongo
from keystone import exception
@@ -139,13 +140,13 @@ class MockCollection(object):
if self._apply_filter(document, spec))
def _apply_filter(self, document, query):
- for key, search in six.iteritems(query):
+ for key, search in query.items():
doc_val = document.get(key)
if isinstance(search, dict):
op_dict = {'$in': lambda dv, sv: dv in sv}
is_match = all(
op_str in op_dict and op_dict[op_str](doc_val, search_val)
- for op_str, search_val in six.iteritems(search)
+ for op_str, search_val in search.items()
)
else:
is_match = doc_val == search
@@ -160,7 +161,7 @@ class MockCollection(object):
return new
if isinstance(obj, dict):
new = container()
- for key, value in obj.items():
+ for key, value in list(obj.items()):
new[key] = self._copy_doc(value, container)
return new
else:
@@ -198,7 +199,7 @@ class MockCollection(object):
existing_doc = self._documents[self._insert(document)]
def _internalize_dict(self, d):
- return {k: copy.deepcopy(v) for k, v in six.iteritems(d)}
+ return {k: copy.deepcopy(v) for k, v in d.items()}
def remove(self, spec_or_id=None, search_filter=None):
"""Remove objects matching spec_or_id from the collection."""
diff --git a/keystone-moon/keystone/tests/unit/test_catalog.py b/keystone-moon/keystone/tests/unit/test_catalog.py
index 9dda5d83..4e7f4037 100644
--- a/keystone-moon/keystone/tests/unit/test_catalog.py
+++ b/keystone-moon/keystone/tests/unit/test_catalog.py
@@ -14,8 +14,6 @@
import uuid
-import six
-
from keystone import catalog
from keystone.tests import unit as tests
from keystone.tests.unit.ksfixtures import database
@@ -47,9 +45,7 @@ class V2CatalogTestCase(rest.RestfulTestCase):
def config_overrides(self):
super(V2CatalogTestCase, self).config_overrides()
- self.config_fixture.config(
- group='catalog',
- driver='keystone.catalog.backends.sql.Catalog')
+ self.config_fixture.config(group='catalog', driver='sql')
def new_ref(self):
"""Populates a ref with attributes common to all API entities."""
@@ -95,7 +91,7 @@ class V2CatalogTestCase(rest.RestfulTestCase):
req_body, response = self._endpoint_create()
self.assertIn('endpoint', response.result)
self.assertIn('id', response.result['endpoint'])
- for field, value in six.iteritems(req_body['endpoint']):
+ for field, value in req_body['endpoint'].items():
self.assertEqual(response.result['endpoint'][field], value)
def test_endpoint_create_with_null_adminurl(self):
@@ -130,6 +126,92 @@ class V2CatalogTestCase(rest.RestfulTestCase):
def test_endpoint_create_with_empty_service_id(self):
self._endpoint_create(expected_status=400, service_id='')
+ def test_endpoint_create_with_valid_url(self):
+ """Create endpoint with valid URL should be tested, too."""
+ # list one valid url is enough, no need to list too much
+ valid_url = 'http://127.0.0.1:8774/v1.1/$(tenant_id)s'
+
+ # baseline tests that all valid URLs works
+ self._endpoint_create(expected_status=200,
+ publicurl=valid_url,
+ internalurl=valid_url,
+ adminurl=valid_url)
+
+ def test_endpoint_create_with_invalid_url(self):
+ """Test the invalid cases: substitutions is not exactly right."""
+ invalid_urls = [
+ # using a substitution that is not whitelisted - KeyError
+ 'http://127.0.0.1:8774/v1.1/$(nonexistent)s',
+
+ # invalid formatting - ValueError
+ 'http://127.0.0.1:8774/v1.1/$(tenant_id)',
+ 'http://127.0.0.1:8774/v1.1/$(tenant_id)t',
+ 'http://127.0.0.1:8774/v1.1/$(tenant_id',
+
+ # invalid type specifier - TypeError
+ # admin_url is a string not an int
+ 'http://127.0.0.1:8774/v1.1/$(admin_url)d',
+ ]
+
+ # list one valid url is enough, no need to list too much
+ valid_url = 'http://127.0.0.1:8774/v1.1/$(tenant_id)s'
+
+ # Case one: publicurl, internalurl and adminurl are
+ # all invalid
+ for invalid_url in invalid_urls:
+ self._endpoint_create(expected_status=400,
+ publicurl=invalid_url,
+ internalurl=invalid_url,
+ adminurl=invalid_url)
+
+ # Case two: publicurl, internalurl are invalid
+ # and adminurl is valid
+ for invalid_url in invalid_urls:
+ self._endpoint_create(expected_status=400,
+ publicurl=invalid_url,
+ internalurl=invalid_url,
+ adminurl=valid_url)
+
+ # Case three: publicurl, adminurl are invalid
+ # and internalurl is valid
+ for invalid_url in invalid_urls:
+ self._endpoint_create(expected_status=400,
+ publicurl=invalid_url,
+ internalurl=valid_url,
+ adminurl=invalid_url)
+
+ # Case four: internalurl, adminurl are invalid
+ # and publicurl is valid
+ for invalid_url in invalid_urls:
+ self._endpoint_create(expected_status=400,
+ publicurl=valid_url,
+ internalurl=invalid_url,
+ adminurl=invalid_url)
+
+ # Case five: publicurl is invalid, internalurl
+ # and adminurl are valid
+ for invalid_url in invalid_urls:
+ self._endpoint_create(expected_status=400,
+ publicurl=invalid_url,
+ internalurl=valid_url,
+ adminurl=valid_url)
+
+ # Case six: internalurl is invalid, publicurl
+ # and adminurl are valid
+ for invalid_url in invalid_urls:
+ self._endpoint_create(expected_status=400,
+ publicurl=valid_url,
+ internalurl=invalid_url,
+ adminurl=valid_url)
+
+ # Case seven: adminurl is invalid, publicurl
+ # and internalurl are valid
+ for invalid_url in invalid_urls:
+ self._endpoint_create(expected_status=400,
+ publicurl=valid_url,
+ internalurl=valid_url,
+ adminurl=invalid_url)
+
class TestV2CatalogAPISQL(tests.TestCase):
@@ -147,9 +229,7 @@ class TestV2CatalogAPISQL(tests.TestCase):
def config_overrides(self):
super(TestV2CatalogAPISQL, self).config_overrides()
- self.config_fixture.config(
- group='catalog',
- driver='keystone.catalog.backends.sql.Catalog')
+ self.config_fixture.config(group='catalog', driver='sql')
def new_endpoint_ref(self, service_id):
return {
diff --git a/keystone-moon/keystone/tests/unit/test_cert_setup.py b/keystone-moon/keystone/tests/unit/test_cert_setup.py
index d1e9ccfd..3d300810 100644
--- a/keystone-moon/keystone/tests/unit/test_cert_setup.py
+++ b/keystone-moon/keystone/tests/unit/test_cert_setup.py
@@ -68,9 +68,7 @@ class CertSetupTestCase(rest.RestfulTestCase):
ca_certs=ca_certs,
certfile=os.path.join(CERTDIR, 'keystone.pem'),
keyfile=os.path.join(KEYDIR, 'keystonekey.pem'))
- self.config_fixture.config(
- group='token',
- provider='keystone.token.providers.pkiz.Provider')
+ self.config_fixture.config(group='token', provider='pkiz')
def test_can_handle_missing_certs(self):
controller = token.controllers.Auth()
diff --git a/keystone-moon/keystone/tests/unit/test_cli.py b/keystone-moon/keystone/tests/unit/test_cli.py
index 20aa03e6..3f37612e 100644
--- a/keystone-moon/keystone/tests/unit/test_cli.py
+++ b/keystone-moon/keystone/tests/unit/test_cli.py
@@ -17,14 +17,16 @@ import uuid
import mock
from oslo_config import cfg
+from six.moves import range
-from keystone import cli
+from keystone.cmd import cli
from keystone.common import dependency
from keystone.i18n import _
from keystone import resource
from keystone.tests import unit as tests
from keystone.tests.unit.ksfixtures import database
+
CONF = cfg.CONF
@@ -103,14 +105,14 @@ class CliDomainConfigAllTestCase(tests.SQLDriverOverrides, tests.TestCase):
'user': 'cn=Admin',
'password': 'password',
'suffix': 'cn=example,cn=com'},
- 'identity': {'driver': 'keystone.identity.backends.ldap.Identity'}
+ 'identity': {'driver': 'ldap'}
}
domain1_config = {
'ldap': {'url': 'fake://memory1',
'user': 'cn=Admin',
'password': 'password',
'suffix': 'cn=example,cn=com'},
- 'identity': {'driver': 'keystone.identity.backends.ldap.Identity'}
+ 'identity': {'driver': 'ldap'}
}
domain2_config = {
'ldap': {'url': 'fake://memory',
@@ -119,7 +121,7 @@ class CliDomainConfigAllTestCase(tests.SQLDriverOverrides, tests.TestCase):
'suffix': 'cn=myroot,cn=com',
'group_tree_dn': 'ou=UserGroups,dc=myroot,dc=org',
'user_tree_dn': 'ou=Users,dc=myroot,dc=org'},
- 'identity': {'driver': 'keystone.identity.backends.ldap.Identity'}
+ 'identity': {'driver': 'ldap'}
}
# Clear backend dependencies, since cli loads these manually
@@ -151,7 +153,7 @@ class CliDomainConfigSingleDomainTestCase(CliDomainConfigAllTestCase):
'user': 'cn=Admin',
'password': 'password',
'suffix': 'cn=example,cn=com'},
- 'identity': {'driver': 'keystone.identity.backends.ldap.Identity'}
+ 'identity': {'driver': 'ldap'}
}
# Clear backend dependencies, since cli loads these manually
@@ -172,7 +174,7 @@ class CliDomainConfigSingleDomainTestCase(CliDomainConfigAllTestCase):
# Create a config for the default domain
default_config = {
'ldap': {'url': uuid.uuid4().hex},
- 'identity': {'driver': 'keystone.identity.backends.ldap.Identity'}
+ 'identity': {'driver': 'ldap'}
}
self.domain_config_api.create_config(
CONF.identity.default_domain_id, default_config)
diff --git a/keystone-moon/keystone/tests/unit/test_config.py b/keystone-moon/keystone/tests/unit/test_config.py
index 15cfac81..431f9965 100644
--- a/keystone-moon/keystone/tests/unit/test_config.py
+++ b/keystone-moon/keystone/tests/unit/test_config.py
@@ -46,10 +46,8 @@ class ConfigTestCase(tests.TestCase):
config.find_paste_config())
def test_config_default(self):
- self.assertEqual('keystone.auth.plugins.password.Password',
- CONF.auth.password)
- self.assertEqual('keystone.auth.plugins.token.Token',
- CONF.auth.token)
+ self.assertIs(None, CONF.auth.password)
+ self.assertIs(None, CONF.auth.token)
class DeprecatedTestCase(tests.TestCase):
diff --git a/keystone-moon/keystone/tests/unit/test_contrib_ec2.py b/keystone-moon/keystone/tests/unit/test_contrib_ec2.py
new file mode 100644
index 00000000..c6717dc5
--- /dev/null
+++ b/keystone-moon/keystone/tests/unit/test_contrib_ec2.py
@@ -0,0 +1,208 @@
+# Copyright 2015 Intel Corporation
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import uuid
+
+from keystoneclient.contrib.ec2 import utils as ec2_utils
+
+from keystone.contrib.ec2 import controllers
+from keystone import exception
+from keystone.tests import unit as tests
+from keystone.tests.unit import default_fixtures
+from keystone.tests.unit.ksfixtures import database
+
+
+class TestCredentialEc2(tests.TestCase):
+ # TODO(davechen): more testcases for ec2 credential are expected here and
+ # the file name would be renamed to "test_credential" to correspond with
+ # "test_v3_credential.py".
+ def setUp(self):
+ super(TestCredentialEc2, self).setUp()
+ self.useFixture(database.Database())
+ self.load_backends()
+ self.load_fixtures(default_fixtures)
+ self.user_id = self.user_foo['id']
+ self.project_id = self.tenant_bar['id']
+ self.blob = {'access': uuid.uuid4().hex,
+ 'secret': uuid.uuid4().hex}
+ self.controller = controllers.Ec2Controller()
+ self.creds_ref = {'user_id': self.user_id,
+ 'tenant_id': self.project_id,
+ 'access': self.blob['access'],
+ 'secret': self.blob['secret'],
+ 'trust_id': None}
+
+ def test_signature_validate_no_host_port(self):
+ """Test signature validation with the access/secret provided."""
+ access = self.blob['access']
+ secret = self.blob['secret']
+ signer = ec2_utils.Ec2Signer(secret)
+ params = {'SignatureMethod': 'HmacSHA256',
+ 'SignatureVersion': '2',
+ 'AWSAccessKeyId': access}
+ request = {'host': 'foo',
+ 'verb': 'GET',
+ 'path': '/bar',
+ 'params': params}
+ signature = signer.generate(request)
+
+ sig_ref = {'access': access,
+ 'signature': signature,
+ 'host': 'foo',
+ 'verb': 'GET',
+ 'path': '/bar',
+ 'params': params}
+
+ # Now validate the signature based on the dummy request
+ self.assertTrue(self.controller.check_signature(self.creds_ref,
+ sig_ref))
+
+ def test_signature_validate_with_host_port(self):
+ """Test signature validation when host is bound with port.
+
+ Host is bound with a port, generally, the port here is not the
+ standard port for the protocol, like '80' for HTTP and port 443
+ for HTTPS, the port is not omitted by the client library.
+ """
+ access = self.blob['access']
+ secret = self.blob['secret']
+ signer = ec2_utils.Ec2Signer(secret)
+ params = {'SignatureMethod': 'HmacSHA256',
+ 'SignatureVersion': '2',
+ 'AWSAccessKeyId': access}
+ request = {'host': 'foo:8181',
+ 'verb': 'GET',
+ 'path': '/bar',
+ 'params': params}
+ signature = signer.generate(request)
+
+ sig_ref = {'access': access,
+ 'signature': signature,
+ 'host': 'foo:8181',
+ 'verb': 'GET',
+ 'path': '/bar',
+ 'params': params}
+
+ # Now validate the signature based on the dummy request
+ self.assertTrue(self.controller.check_signature(self.creds_ref,
+ sig_ref))
+
+ def test_signature_validate_with_missed_host_port(self):
+ """Test signature validation when host is bound with well-known port.
+
+ Host is bound with a port, but the port is well-know port like '80'
+ for HTTP and port 443 for HTTPS, sometimes, client library omit
+ the port but then make the request with the port.
+ see (How to create the string to sign): 'http://docs.aws.amazon.com/
+ general/latest/gr/signature-version-2.html'.
+
+ Since "credentials['host']" is not set by client library but is
+ taken from "req.host", so caused the differences.
+ """
+ access = self.blob['access']
+ secret = self.blob['secret']
+ signer = ec2_utils.Ec2Signer(secret)
+ params = {'SignatureMethod': 'HmacSHA256',
+ 'SignatureVersion': '2',
+ 'AWSAccessKeyId': access}
+ # Omit the port to generate the signature.
+ cnt_req = {'host': 'foo',
+ 'verb': 'GET',
+ 'path': '/bar',
+ 'params': params}
+ signature = signer.generate(cnt_req)
+
+ sig_ref = {'access': access,
+ 'signature': signature,
+ 'host': 'foo:8080',
+ 'verb': 'GET',
+ 'path': '/bar',
+ 'params': params}
+
+ # Now validate the signature based on the dummy request
+ # Check the signature again after omitting the port.
+ self.assertTrue(self.controller.check_signature(self.creds_ref,
+ sig_ref))
+
+ def test_signature_validate_no_signature(self):
+ """Signature is not presented in signature reference data."""
+ access = self.blob['access']
+ params = {'SignatureMethod': 'HmacSHA256',
+ 'SignatureVersion': '2',
+ 'AWSAccessKeyId': access}
+
+ sig_ref = {'access': access,
+ 'signature': None,
+ 'host': 'foo:8080',
+ 'verb': 'GET',
+ 'path': '/bar',
+ 'params': params}
+
+ creds_ref = {'user_id': self.user_id,
+ 'tenant_id': self.project_id,
+ 'access': self.blob['access'],
+ 'secret': self.blob['secret'],
+ 'trust_id': None
+ }
+
+ # Now validate the signature based on the dummy request
+ self.assertRaises(exception.Unauthorized,
+ self.controller.check_signature,
+ creds_ref, sig_ref)
+
+ def test_signature_validate_invalid_signature(self):
+ """Signature is not signed on the correct data."""
+ access = self.blob['access']
+ secret = self.blob['secret']
+ signer = ec2_utils.Ec2Signer(secret)
+ params = {'SignatureMethod': 'HmacSHA256',
+ 'SignatureVersion': '2',
+ 'AWSAccessKeyId': access}
+ request = {'host': 'bar',
+ 'verb': 'GET',
+ 'path': '/bar',
+ 'params': params}
+ signature = signer.generate(request)
+
+ sig_ref = {'access': access,
+ 'signature': signature,
+ 'host': 'foo:8080',
+ 'verb': 'GET',
+ 'path': '/bar',
+ 'params': params}
+
+ creds_ref = {'user_id': self.user_id,
+ 'tenant_id': self.project_id,
+ 'access': self.blob['access'],
+ 'secret': self.blob['secret'],
+ 'trust_id': None
+ }
+
+ # Now validate the signature based on the dummy request
+ self.assertRaises(exception.Unauthorized,
+ self.controller.check_signature,
+ creds_ref, sig_ref)
+
+ def test_check_non_admin_user(self):
+ """Checking if user is admin causes uncaught error.
+
+ When checking if a user is an admin, keystone.exception.Unauthorized
+ is raised but not caught if the user is not an admin.
+ """
+ # make a non-admin user
+ context = {'is_admin': False, 'token_id': uuid.uuid4().hex}
+
+ # check if user is admin
+ # no exceptions should be raised
+ self.controller._is_admin(context)
diff --git a/keystone-moon/keystone/tests/unit/test_exception.py b/keystone-moon/keystone/tests/unit/test_exception.py
index f91fa2a7..bf541dfd 100644
--- a/keystone-moon/keystone/tests/unit/test_exception.py
+++ b/keystone-moon/keystone/tests/unit/test_exception.py
@@ -87,7 +87,10 @@ class ExceptionTestCase(tests.BaseTestCase):
e = exception.ValidationError(attribute='xx',
target='Long \xe2\x80\x93 Dash')
- self.assertIn(u'\u2013', six.text_type(e))
+ if six.PY2:
+ self.assertIn(u'\u2013', six.text_type(e))
+ else:
+ self.assertIn('Long \xe2\x80\x93 Dash', six.text_type(e))
def test_invalid_unicode_string(self):
# NOTE(jamielennox): This is a complete failure case so what is
@@ -95,7 +98,12 @@ class ExceptionTestCase(tests.BaseTestCase):
# as there is an error with a message
e = exception.ValidationError(attribute='xx',
target='\xe7a va')
- self.assertIn('%(attribute)', six.text_type(e))
+
+ if six.PY2:
+ self.assertIn('%(attribute)', six.text_type(e))
+ else:
+ # There's no UnicodeDecodeError on python 3.
+ self.assertIn('\xe7a va', six.text_type(e))
class UnexpectedExceptionTestCase(ExceptionTestCase):
diff --git a/keystone-moon/keystone/tests/unit/test_hacking_checks.py b/keystone-moon/keystone/tests/unit/test_hacking_checks.py
index b9b047b3..962f5f8a 100644
--- a/keystone-moon/keystone/tests/unit/test_hacking_checks.py
+++ b/keystone-moon/keystone/tests/unit/test_hacking_checks.py
@@ -14,13 +14,13 @@ import textwrap
import mock
import pep8
-import testtools
-from keystone.hacking import checks
+from keystone.tests.hacking import checks
+from keystone.tests import unit
from keystone.tests.unit.ksfixtures import hacking as hacking_fixtures
-class BaseStyleCheck(testtools.TestCase):
+class BaseStyleCheck(unit.BaseTestCase):
def setUp(self):
super(BaseStyleCheck, self).setUp()
@@ -122,16 +122,6 @@ class TestCheckForNonDebugLoggingIssues(BaseStyleCheck):
self.assertEqual(expected_errors or [], actual_errors)
-class TestCheckOsloNamespaceImports(BaseStyleCheck):
- def get_checker(self):
- return checks.check_oslo_namespace_imports
-
- def test(self):
- code = self.code_ex.oslo_namespace_imports['code']
- errors = self.code_ex.oslo_namespace_imports['expected_errors']
- self.assert_has_errors(code, expected_errors=errors)
-
-
class TestDictConstructorWithSequenceCopy(BaseStyleCheck):
def get_checker(self):
diff --git a/keystone-moon/keystone/tests/unit/test_kvs.py b/keystone-moon/keystone/tests/unit/test_kvs.py
index 4d80ea33..77e05e6d 100644
--- a/keystone-moon/keystone/tests/unit/test_kvs.py
+++ b/keystone-moon/keystone/tests/unit/test_kvs.py
@@ -28,6 +28,7 @@ from keystone.common.kvs import core
from keystone import exception
from keystone.tests import unit as tests
+
NO_VALUE = api.NO_VALUE
@@ -487,6 +488,8 @@ class KVSTest(tests.TestCase):
memcached_expire_time=memcache_expire_time,
some_other_arg=uuid.uuid4().hex,
no_expiry_keys=[self.key_bar])
+ kvs_driver = kvs._region.backend.driver
+
# Ensure the set_arguments are correct
self.assertDictEqual(
kvs._region.backend._get_set_arguments_driver_attr(),
@@ -498,8 +501,8 @@ class KVSTest(tests.TestCase):
self.assertDictEqual(
kvs._region.backend.driver.client.set_arguments_passed,
expected_set_args)
- self.assertEqual(expected_foo_keys,
- kvs._region.backend.driver.client.keys_values.keys())
+ observed_foo_keys = list(kvs_driver.client.keys_values.keys())
+ self.assertEqual(expected_foo_keys, observed_foo_keys)
self.assertEqual(
self.value_foo,
kvs._region.backend.driver.client.keys_values[self.key_foo][0])
@@ -510,8 +513,8 @@ class KVSTest(tests.TestCase):
self.assertDictEqual(
kvs._region.backend.driver.client.set_arguments_passed,
expected_no_expiry_args)
- self.assertEqual(expected_bar_keys,
- kvs._region.backend.driver.client.keys_values.keys())
+ observed_bar_keys = list(kvs_driver.client.keys_values.keys())
+ self.assertEqual(expected_bar_keys, observed_bar_keys)
self.assertEqual(
self.value_bar,
kvs._region.backend.driver.client.keys_values[self.key_bar][0])
@@ -522,8 +525,8 @@ class KVSTest(tests.TestCase):
self.assertDictEqual(
kvs._region.backend.driver.client.set_arguments_passed,
expected_set_args)
- self.assertEqual(expected_foo_keys,
- kvs._region.backend.driver.client.keys_values.keys())
+ observed_foo_keys = list(kvs_driver.client.keys_values.keys())
+ self.assertEqual(expected_foo_keys, observed_foo_keys)
self.assertEqual(
self.value_foo,
kvs._region.backend.driver.client.keys_values[self.key_foo][0])
@@ -534,8 +537,8 @@ class KVSTest(tests.TestCase):
self.assertDictEqual(
kvs._region.backend.driver.client.set_arguments_passed,
expected_no_expiry_args)
- self.assertEqual(expected_bar_keys,
- kvs._region.backend.driver.client.keys_values.keys())
+ observed_bar_keys = list(kvs_driver.client.keys_values.keys())
+ self.assertEqual(expected_bar_keys, observed_bar_keys)
self.assertEqual(
self.value_bar,
kvs._region.backend.driver.client.keys_values[self.key_bar][0])
diff --git a/keystone-moon/keystone/tests/unit/test_ldap_livetest.py b/keystone-moon/keystone/tests/unit/test_ldap_livetest.py
index 5b449362..b9f56e8d 100644
--- a/keystone-moon/keystone/tests/unit/test_ldap_livetest.py
+++ b/keystone-moon/keystone/tests/unit/test_ldap_livetest.py
@@ -15,9 +15,9 @@
import subprocess
import uuid
-import ldap
import ldap.modlist
from oslo_config import cfg
+from six.moves import range
from keystone import exception
from keystone.identity.backends import ldap as identity_ldap
@@ -81,12 +81,6 @@ class LiveLDAPIdentity(test_backend_ldap.LDAPIdentity):
config_files.append(tests.dirs.tests_conf('backend_liveldap.conf'))
return config_files
- def config_overrides(self):
- super(LiveLDAPIdentity, self).config_overrides()
- self.config_fixture.config(
- group='identity',
- driver='keystone.identity.backends.ldap.Identity')
-
def test_build_tree(self):
"""Regression test for building the tree names
"""
@@ -95,9 +89,6 @@ class LiveLDAPIdentity(test_backend_ldap.LDAPIdentity):
self.assertTrue(user_api)
self.assertEqual(user_api.tree_dn, CONF.ldap.user_tree_dn)
- def tearDown(self):
- tests.TestCase.tearDown(self)
-
def test_ldap_dereferencing(self):
alt_users_ldif = {'objectclass': ['top', 'organizationalUnit'],
'ou': 'alt_users'}
@@ -176,8 +167,10 @@ class LiveLDAPIdentity(test_backend_ldap.LDAPIdentity):
negative_user['id'])
self.assertEqual(0, len(group_refs))
- self.config_fixture.config(group='ldap', group_filter='(dn=xx)')
- self.reload_backends(CONF.identity.default_domain_id)
+ driver = self.identity_api._select_identity_driver(
+ CONF.identity.default_domain_id)
+ driver.group.ldap_filter = '(dn=xx)'
+
group_refs = self.identity_api.list_groups_for_user(
positive_user['id'])
self.assertEqual(0, len(group_refs))
@@ -185,9 +178,8 @@ class LiveLDAPIdentity(test_backend_ldap.LDAPIdentity):
negative_user['id'])
self.assertEqual(0, len(group_refs))
- self.config_fixture.config(group='ldap',
- group_filter='(objectclass=*)')
- self.reload_backends(CONF.identity.default_domain_id)
+ driver.group.ldap_filter = '(objectclass=*)'
+
group_refs = self.identity_api.list_groups_for_user(
positive_user['id'])
self.assertEqual(GROUP_COUNT, len(group_refs))
diff --git a/keystone-moon/keystone/tests/unit/test_ldap_pool_livetest.py b/keystone-moon/keystone/tests/unit/test_ldap_pool_livetest.py
index 02fa8145..a8776e5b 100644
--- a/keystone-moon/keystone/tests/unit/test_ldap_pool_livetest.py
+++ b/keystone-moon/keystone/tests/unit/test_ldap_pool_livetest.py
@@ -30,10 +30,10 @@ CONF = cfg.CONF
class LiveLDAPPoolIdentity(test_backend_ldap_pool.LdapPoolCommonTestMixin,
test_ldap_livetest.LiveLDAPIdentity):
- """Executes existing LDAP live test with pooled LDAP handler to make
- sure it works without any error.
+ """Executes existing LDAP live test with pooled LDAP handler.
Also executes common pool specific tests via Mixin class.
+
"""
def setUp(self):
@@ -48,12 +48,6 @@ class LiveLDAPPoolIdentity(test_backend_ldap_pool.LdapPoolCommonTestMixin,
tests_conf('backend_pool_liveldap.conf'))
return config_files
- def config_overrides(self):
- super(LiveLDAPPoolIdentity, self).config_overrides()
- self.config_fixture.config(
- group='identity',
- driver='keystone.identity.backends.ldap.Identity')
-
def test_assert_connector_used_not_fake_ldap_pool(self):
handler = ldap_core._get_connection(CONF.ldap.url, use_pool=True)
self.assertNotEqual(type(handler.Connector),
diff --git a/keystone-moon/keystone/tests/unit/test_ldap_tls_livetest.py b/keystone-moon/keystone/tests/unit/test_ldap_tls_livetest.py
index d79c2bad..e77bbc98 100644
--- a/keystone-moon/keystone/tests/unit/test_ldap_tls_livetest.py
+++ b/keystone-moon/keystone/tests/unit/test_ldap_tls_livetest.py
@@ -13,7 +13,6 @@
# License for the specific language governing permissions and limitations
# under the License.
-import ldap
import ldap.modlist
from oslo_config import cfg
@@ -44,12 +43,6 @@ class LiveTLSLDAPIdentity(test_ldap_livetest.LiveLDAPIdentity):
config_files.append(tests.dirs.tests_conf('backend_tls_liveldap.conf'))
return config_files
- def config_overrides(self):
- super(LiveTLSLDAPIdentity, self).config_overrides()
- self.config_fixture.config(
- group='identity',
- driver='keystone.identity.backends.ldap.Identity')
-
def test_tls_certfile_demand_option(self):
self.config_fixture.config(group='ldap',
use_tls=True,
diff --git a/keystone-moon/keystone/tests/unit/test_policy.py b/keystone-moon/keystone/tests/unit/test_policy.py
index 2c0c3995..30df0b2b 100644
--- a/keystone-moon/keystone/tests/unit/test_policy.py
+++ b/keystone-moon/keystone/tests/unit/test_policy.py
@@ -14,6 +14,7 @@
# under the License.
import json
+import os
import mock
from oslo_policy import policy as common_policy
@@ -223,6 +224,48 @@ class PolicyJsonTestCase(tests.TestCase):
cloud_policy_keys = self._load_entries(
tests.dirs.etc('policy.v3cloudsample.json'))
- diffs = set(policy_keys).difference(set(cloud_policy_keys))
+ policy_extra_keys = ['admin_or_token_subject',
+ 'service_admin_or_token_subject',
+ 'token_subject', ]
+ expected_policy_keys = list(cloud_policy_keys) + policy_extra_keys
+ diffs = set(policy_keys).difference(set(expected_policy_keys))
self.assertThat(diffs, matchers.Equals(set()))
+
+ def test_all_targets_documented(self):
+ # All the targets in the sample policy file must be documented in
+ # doc/source/policy_mapping.rst.
+
+ policy_keys = self._load_entries(tests.dirs.etc('policy.json'))
+
+ # These keys are in the policy.json but aren't targets.
+ policy_rule_keys = [
+ 'admin_or_owner', 'admin_or_token_subject', 'admin_required',
+ 'default', 'owner', 'service_admin_or_token_subject',
+ 'service_or_admin', 'service_role', 'token_subject', ]
+
+ def read_doc_targets():
+ # Parse the doc/source/policy_mapping.rst file and return the
+ # targets.
+
+ doc_path = os.path.join(
+ tests.ROOTDIR, 'doc', 'source', 'policy_mapping.rst')
+ with open(doc_path) as doc_file:
+ for line in doc_file:
+ if line.startswith('Target'):
+ break
+ for line in doc_file:
+ # Skip === line
+ if line.startswith('==='):
+ break
+ for line in doc_file:
+ line = line.rstrip()
+ if not line or line.startswith(' '):
+ continue
+ if line.startswith('=='):
+ break
+ target, dummy, dummy = line.partition(' ')
+ yield six.text_type(target)
+
+ doc_targets = list(read_doc_targets())
+ self.assertItemsEqual(policy_keys, doc_targets + policy_rule_keys)
diff --git a/keystone-moon/keystone/tests/unit/test_revoke.py b/keystone-moon/keystone/tests/unit/test_revoke.py
index 727eff78..5394688c 100644
--- a/keystone-moon/keystone/tests/unit/test_revoke.py
+++ b/keystone-moon/keystone/tests/unit/test_revoke.py
@@ -16,8 +16,10 @@ import uuid
import mock
from oslo_utils import timeutils
+from six.moves import range
from testtools import matchers
+from keystone.common import utils
from keystone.contrib.revoke import model
from keystone import exception
from keystone.tests import unit as tests
@@ -112,6 +114,7 @@ def _matches(event, token_values):
class RevokeTests(object):
+
def test_list(self):
self.revoke_api.revoke_by_user(user_id=1)
self.assertEqual(1, len(self.revoke_api.list_events()))
@@ -140,8 +143,8 @@ class RevokeTests(object):
def test_expired_events_removed_validate_token_success(self, mock_utcnow):
def _sample_token_values():
token = _sample_blank_token()
- token['expires_at'] = timeutils.isotime(_future_time(),
- subsecond=True)
+ token['expires_at'] = utils.isotime(_future_time(),
+ subsecond=True)
return token
now = datetime.datetime.utcnow()
@@ -168,7 +171,7 @@ class RevokeTests(object):
def test_revoke_by_expiration_project_and_domain_fails(self):
user_id = _new_id()
- expires_at = timeutils.isotime(_future_time(), subsecond=True)
+ expires_at = utils.isotime(_future_time(), subsecond=True)
domain_id = _new_id()
project_id = _new_id()
self.assertThat(
@@ -181,24 +184,20 @@ class RevokeTests(object):
class SqlRevokeTests(test_backend_sql.SqlTests, RevokeTests):
def config_overrides(self):
super(SqlRevokeTests, self).config_overrides()
- self.config_fixture.config(
- group='revoke',
- driver='keystone.contrib.revoke.backends.sql.Revoke')
+ self.config_fixture.config(group='revoke', driver='sql')
self.config_fixture.config(
group='token',
- provider='keystone.token.providers.pki.Provider',
+ provider='pki',
revoke_by_id=False)
class KvsRevokeTests(tests.TestCase, RevokeTests):
def config_overrides(self):
super(KvsRevokeTests, self).config_overrides()
- self.config_fixture.config(
- group='revoke',
- driver='keystone.contrib.revoke.backends.kvs.Revoke')
+ self.config_fixture.config(group='revoke', driver='kvs')
self.config_fixture.config(
group='token',
- provider='keystone.token.providers.pki.Provider',
+ provider='pki',
revoke_by_id=False)
def setUp(self):
diff --git a/keystone-moon/keystone/tests/unit/test_sql_migrate_extensions.py b/keystone-moon/keystone/tests/unit/test_sql_migrate_extensions.py
index edfb91d7..87b3d48d 100644
--- a/keystone-moon/keystone/tests/unit/test_sql_migrate_extensions.py
+++ b/keystone-moon/keystone/tests/unit/test_sql_migrate_extensions.py
@@ -53,12 +53,6 @@ class SqlUpgradeExampleExtension(test_sql_upgrade.SqlMigrateBase):
self.upgrade(1, repository=self.repo_path)
self.assertTableColumns('example', ['id', 'type', 'extra'])
- def test_downgrade(self):
- self.upgrade(1, repository=self.repo_path)
- self.assertTableColumns('example', ['id', 'type', 'extra'])
- self.downgrade(0, repository=self.repo_path)
- self.assertTableDoesNotExist('example')
-
class SqlUpgradeOAuth1Extension(test_sql_upgrade.SqlMigrateBase):
def repo_package(self):
@@ -68,10 +62,6 @@ class SqlUpgradeOAuth1Extension(test_sql_upgrade.SqlMigrateBase):
super(SqlUpgradeOAuth1Extension, self).upgrade(
version, repository=self.repo_path)
- def downgrade(self, version):
- super(SqlUpgradeOAuth1Extension, self).downgrade(
- version, repository=self.repo_path)
-
def _assert_v1_3_tables(self):
self.assertTableColumns('consumer',
['id',
@@ -136,18 +126,6 @@ class SqlUpgradeOAuth1Extension(test_sql_upgrade.SqlMigrateBase):
self.upgrade(5)
self._assert_v4_later_tables()
- def test_downgrade(self):
- self.upgrade(5)
- self._assert_v4_later_tables()
- self.downgrade(3)
- self._assert_v1_3_tables()
- self.downgrade(1)
- self._assert_v1_3_tables()
- self.downgrade(0)
- self.assertTableDoesNotExist('consumer')
- self.assertTableDoesNotExist('request_token')
- self.assertTableDoesNotExist('access_token')
-
class EndpointFilterExtension(test_sql_upgrade.SqlMigrateBase):
def repo_package(self):
@@ -157,10 +135,6 @@ class EndpointFilterExtension(test_sql_upgrade.SqlMigrateBase):
super(EndpointFilterExtension, self).upgrade(
version, repository=self.repo_path)
- def downgrade(self, version):
- super(EndpointFilterExtension, self).downgrade(
- version, repository=self.repo_path)
-
def _assert_v1_tables(self):
self.assertTableColumns('project_endpoint',
['endpoint_id', 'project_id'])
@@ -184,14 +158,6 @@ class EndpointFilterExtension(test_sql_upgrade.SqlMigrateBase):
self.upgrade(2)
self._assert_v2_tables()
- def test_downgrade(self):
- self.upgrade(2)
- self._assert_v2_tables()
- self.downgrade(1)
- self._assert_v1_tables()
- self.downgrade(0)
- self.assertTableDoesNotExist('project_endpoint')
-
class EndpointPolicyExtension(test_sql_upgrade.SqlMigrateBase):
def repo_package(self):
@@ -204,14 +170,6 @@ class EndpointPolicyExtension(test_sql_upgrade.SqlMigrateBase):
['id', 'policy_id', 'endpoint_id',
'service_id', 'region_id'])
- def test_downgrade(self):
- self.upgrade(1, repository=self.repo_path)
- self.assertTableColumns('policy_association',
- ['id', 'policy_id', 'endpoint_id',
- 'service_id', 'region_id'])
- self.downgrade(0, repository=self.repo_path)
- self.assertTableDoesNotExist('policy_association')
-
class FederationExtension(test_sql_upgrade.SqlMigrateBase):
"""Test class for ensuring the Federation SQL."""
@@ -264,27 +222,7 @@ class FederationExtension(test_sql_upgrade.SqlMigrateBase):
'federation_protocol')
self.assertFalse(federation_protocol.c.mapping_id.nullable)
- def test_downgrade(self):
- self.upgrade(3, repository=self.repo_path)
- self.assertTableColumns(self.identity_provider,
- ['id', 'enabled', 'description'])
- self.assertTableColumns(self.federation_protocol,
- ['id', 'idp_id', 'mapping_id'])
- self.assertTableColumns(self.mapping,
- ['id', 'rules'])
-
- self.downgrade(2, repository=self.repo_path)
- federation_protocol = utils.get_table(
- self.engine,
- 'federation_protocol')
- self.assertTrue(federation_protocol.c.mapping_id.nullable)
-
- self.downgrade(0, repository=self.repo_path)
- self.assertTableDoesNotExist(self.identity_provider)
- self.assertTableDoesNotExist(self.federation_protocol)
- self.assertTableDoesNotExist(self.mapping)
-
- def test_fixup_service_provider_attributes(self):
+ def test_service_provider_attributes_cannot_be_null(self):
self.upgrade(6, repository=self.repo_path)
self.assertTableColumns(self.service_provider,
['id', 'description', 'enabled', 'auth_url',
@@ -325,12 +263,28 @@ class FederationExtension(test_sql_upgrade.SqlMigrateBase):
sp3)
session.close()
- self.downgrade(5, repository=self.repo_path)
+
+ def test_fixup_service_provider_attributes(self):
+ session = self.Session()
+ sp1 = {'id': uuid.uuid4().hex,
+ 'auth_url': None,
+ 'sp_url': uuid.uuid4().hex,
+ 'description': uuid.uuid4().hex,
+ 'enabled': True}
+ sp2 = {'id': uuid.uuid4().hex,
+ 'auth_url': uuid.uuid4().hex,
+ 'sp_url': None,
+ 'description': uuid.uuid4().hex,
+ 'enabled': True}
+ sp3 = {'id': uuid.uuid4().hex,
+ 'auth_url': None,
+ 'sp_url': None,
+ 'description': uuid.uuid4().hex,
+ 'enabled': True}
+ self.upgrade(5, repository=self.repo_path)
self.assertTableColumns(self.service_provider,
['id', 'description', 'enabled', 'auth_url',
'sp_url'])
- session = self.Session()
- self.metadata.clear()
# Before the migration, the table should accept null values
self.insert_dict(session, self.service_provider, sp1)
@@ -356,13 +310,20 @@ class FederationExtension(test_sql_upgrade.SqlMigrateBase):
self.assertEqual('', sp.auth_url)
self.assertEqual('', sp.sp_url)
-_REVOKE_COLUMN_NAMES = ['id', 'domain_id', 'project_id', 'user_id', 'role_id',
- 'trust_id', 'consumer_id', 'access_token_id',
- 'issued_before', 'expires_at', 'revoked_at']
+ def test_add_relay_state_column(self):
+ self.upgrade(8, repository=self.repo_path)
+ self.assertTableColumns(self.service_provider,
+ ['id', 'description', 'enabled', 'auth_url',
+ 'relay_state_prefix', 'sp_url'])
class RevokeExtension(test_sql_upgrade.SqlMigrateBase):
+ _REVOKE_COLUMN_NAMES = ['id', 'domain_id', 'project_id', 'user_id',
+ 'role_id', 'trust_id', 'consumer_id',
+ 'access_token_id', 'issued_before', 'expires_at',
+ 'revoked_at']
+
def repo_package(self):
return revoke
@@ -370,11 +331,4 @@ class RevokeExtension(test_sql_upgrade.SqlMigrateBase):
self.assertTableDoesNotExist('revocation_event')
self.upgrade(1, repository=self.repo_path)
self.assertTableColumns('revocation_event',
- _REVOKE_COLUMN_NAMES)
-
- def test_downgrade(self):
- self.upgrade(1, repository=self.repo_path)
- self.assertTableColumns('revocation_event',
- _REVOKE_COLUMN_NAMES)
- self.downgrade(0, repository=self.repo_path)
- self.assertTableDoesNotExist('revocation_event')
+ self._REVOKE_COLUMN_NAMES)
diff --git a/keystone-moon/keystone/tests/unit/test_sql_upgrade.py b/keystone-moon/keystone/tests/unit/test_sql_upgrade.py
index e50bad56..96dfa9e8 100644
--- a/keystone-moon/keystone/tests/unit/test_sql_upgrade.py
+++ b/keystone-moon/keystone/tests/unit/test_sql_upgrade.py
@@ -38,7 +38,6 @@ from oslo_config import cfg
from oslo_db import exception as db_exception
from oslo_db.sqlalchemy import migration
from oslo_db.sqlalchemy import session as db_session
-import six
from sqlalchemy.engine import reflection
import sqlalchemy.exc
from sqlalchemy import schema
@@ -158,6 +157,7 @@ class SqlMigrateBase(tests.SQLDriverOverrides, tests.TestCase):
# create and share a single sqlalchemy engine for testing
self.engine = sql.get_engine()
self.Session = db_session.get_maker(self.engine, autocommit=False)
+ self.addCleanup(sqlalchemy.orm.session.Session.close_all)
self.initialize_sql()
self.repo_path = migration_helpers.find_migrate_repo(
@@ -169,8 +169,12 @@ class SqlMigrateBase(tests.SQLDriverOverrides, tests.TestCase):
# auto-detect the highest available schema version in the migrate_repo
self.max_version = self.schema.repository.version().version
- def tearDown(self):
- sqlalchemy.orm.session.Session.close_all()
+ self.addCleanup(sql.cleanup)
+
+ # drop tables and FKs.
+ self.addCleanup(self._cleanupDB)
+
+ def _cleanupDB(self):
meta = sqlalchemy.MetaData()
meta.bind = self.engine
meta.reflect(self.engine)
@@ -193,14 +197,12 @@ class SqlMigrateBase(tests.SQLDriverOverrides, tests.TestCase):
all_fks.extend(fks)
for fkc in all_fks:
- conn.execute(schema.DropConstraint(fkc))
+ if self.engine.name != 'sqlite':
+ conn.execute(schema.DropConstraint(fkc))
for table in tbs:
conn.execute(schema.DropTable(table))
- sql.cleanup()
- super(SqlMigrateBase, self).tearDown()
-
def select_table(self, name):
table = sqlalchemy.Table(name,
self.metadata,
@@ -230,9 +232,6 @@ class SqlMigrateBase(tests.SQLDriverOverrides, tests.TestCase):
def upgrade(self, *args, **kwargs):
self._migrate(*args, **kwargs)
- def downgrade(self, *args, **kwargs):
- self._migrate(*args, downgrade=True, **kwargs)
-
def _migrate(self, version, repository=None, downgrade=False,
current_schema=None):
repository = repository or self.repo_path
@@ -278,42 +277,6 @@ class SqlUpgradeTests(SqlMigrateBase):
version,
'DB is not at version %s' % migrate_repo.DB_INIT_VERSION)
- def test_two_steps_forward_one_step_back(self):
- """You should be able to cleanly undo and re-apply all upgrades.
-
- Upgrades are run in the following order::
-
- Starting with the initial version defined at
- keystone.common.migrate_repo.DB_INIT_VERSION
-
- INIT +1 -> INIT +2 -> INIT +1 -> INIT +2 -> INIT +3 -> INIT +2 ...
- ^---------------------^ ^---------------------^
-
- Downgrade to the DB_INIT_VERSION does not occur based on the
- requirement that the base version be DB_INIT_VERSION + 1 before
- migration can occur. Downgrade below DB_INIT_VERSION + 1 is no longer
- supported.
-
- DB_INIT_VERSION is the number preceding the release schema version from
- two releases prior. Example, Juno releases with the DB_INIT_VERSION
- being 35 where Havana (Havana was two releases before Juno) release
- schema version is 36.
-
- The migrate utility requires the db must be initialized under version
- control with the revision directly before the first version to be
- applied.
-
- """
- for x in range(migrate_repo.DB_INIT_VERSION + 1,
- self.max_version + 1):
- self.upgrade(x)
- downgrade_ver = x - 1
- # Don't actually downgrade to the init version. This will raise
- # a not-implemented error.
- if downgrade_ver != migrate_repo.DB_INIT_VERSION:
- self.downgrade(x - 1)
- self.upgrade(x)
-
def test_upgrade_add_initial_tables(self):
self.upgrade(migrate_repo.DB_INIT_VERSION + 1)
self.check_initial_table_structure()
@@ -338,32 +301,6 @@ class SqlUpgradeTests(SqlMigrateBase):
for k in default_domain.keys():
self.assertEqual(default_domain[k], getattr(refs[0], k))
- def test_downgrade_to_db_init_version(self):
- self.upgrade(self.max_version)
-
- if self.engine.name == 'mysql':
- self._mysql_check_all_tables_innodb()
-
- self.downgrade(migrate_repo.DB_INIT_VERSION + 1)
- self.check_initial_table_structure()
-
- meta = sqlalchemy.MetaData()
- meta.bind = self.engine
- meta.reflect(self.engine)
-
- initial_table_set = set(INITIAL_TABLE_STRUCTURE.keys())
- table_set = set(meta.tables.keys())
- # explicitly remove the migrate_version table, this is not controlled
- # by the migration scripts and should be exempt from this check.
- table_set.remove('migrate_version')
-
- self.assertSetEqual(initial_table_set, table_set)
- # Downgrade to before Icehouse's release schema version (044) is not
- # supported. A NotImplementedError should be raised when attempting to
- # downgrade.
- self.assertRaises(NotImplementedError, self.downgrade,
- migrate_repo.DB_INIT_VERSION)
-
def insert_dict(self, session, table_name, d, table=None):
"""Naively inserts key-value pairs into a table, given a dictionary."""
if table is None:
@@ -380,8 +317,6 @@ class SqlUpgradeTests(SqlMigrateBase):
self.assertTableDoesNotExist('id_mapping')
self.upgrade(51)
self.assertTableExists('id_mapping')
- self.downgrade(50)
- self.assertTableDoesNotExist('id_mapping')
def test_region_url_upgrade(self):
self.upgrade(52)
@@ -389,42 +324,6 @@ class SqlUpgradeTests(SqlMigrateBase):
['id', 'description', 'parent_region_id',
'extra', 'url'])
- def test_region_url_downgrade(self):
- self.upgrade(52)
- self.downgrade(51)
- self.assertTableColumns('region',
- ['id', 'description', 'parent_region_id',
- 'extra'])
-
- def test_region_url_cleanup(self):
- # make sure that the url field is dropped in the downgrade
- self.upgrade(52)
- session = self.Session()
- beta = {
- 'id': uuid.uuid4().hex,
- 'description': uuid.uuid4().hex,
- 'parent_region_id': uuid.uuid4().hex,
- 'url': uuid.uuid4().hex
- }
- acme = {
- 'id': uuid.uuid4().hex,
- 'description': uuid.uuid4().hex,
- 'parent_region_id': uuid.uuid4().hex,
- 'url': None
- }
- self.insert_dict(session, 'region', beta)
- self.insert_dict(session, 'region', acme)
- region_table = sqlalchemy.Table('region', self.metadata, autoload=True)
- self.assertEqual(2, session.query(region_table).count())
- session.close()
- self.downgrade(51)
- session = self.Session()
- self.metadata.clear()
- region_table = sqlalchemy.Table('region', self.metadata, autoload=True)
- self.assertEqual(2, session.query(region_table).count())
- region = session.query(region_table)[0]
- self.assertRaises(AttributeError, getattr, region, 'url')
-
def test_endpoint_region_upgrade_columns(self):
self.upgrade(53)
self.assertTableColumns('endpoint',
@@ -439,21 +338,6 @@ class SqlUpgradeTests(SqlMigrateBase):
autoload=True)
self.assertEqual(255, endpoint_table.c.region_id.type.length)
- def test_endpoint_region_downgrade_columns(self):
- self.upgrade(53)
- self.downgrade(52)
- self.assertTableColumns('endpoint',
- ['id', 'legacy_endpoint_id', 'interface',
- 'service_id', 'url', 'extra', 'enabled',
- 'region'])
- region_table = sqlalchemy.Table('region', self.metadata, autoload=True)
- self.assertEqual(64, region_table.c.id.type.length)
- self.assertEqual(64, region_table.c.parent_region_id.type.length)
- endpoint_table = sqlalchemy.Table('endpoint',
- self.metadata,
- autoload=True)
- self.assertEqual(255, endpoint_table.c.region.type.length)
-
def test_endpoint_region_migration(self):
self.upgrade(52)
session = self.Session()
@@ -519,106 +403,29 @@ class SqlUpgradeTests(SqlMigrateBase):
self.assertEqual(1, session.query(endpoint_table).
filter_by(region_id=_small_region_name).count())
- # downgrade to 52
- session.close()
- self.downgrade(52)
- session = self.Session()
- self.metadata.clear()
-
- region_table = sqlalchemy.Table('region', self.metadata, autoload=True)
- self.assertEqual(1, session.query(region_table).count())
- self.assertEqual(1, session.query(region_table).
- filter_by(id=_small_region_name).count())
-
- endpoint_table = sqlalchemy.Table('endpoint',
- self.metadata,
- autoload=True)
- self.assertEqual(5, session.query(endpoint_table).count())
- self.assertEqual(2, session.query(endpoint_table).
- filter_by(region=_long_region_name).count())
- self.assertEqual(1, session.query(endpoint_table).
- filter_by(region=_clashing_region_name).count())
- self.assertEqual(1, session.query(endpoint_table).
- filter_by(region=_small_region_name).count())
-
def test_add_actor_id_index(self):
self.upgrade(53)
self.upgrade(54)
table = sqlalchemy.Table('assignment', self.metadata, autoload=True)
- index_data = [(idx.name, idx.columns.keys()) for idx in table.indexes]
+ index_data = [(idx.name, list(idx.columns.keys()))
+ for idx in table.indexes]
self.assertIn(('ix_actor_id', ['actor_id']), index_data)
def test_token_user_id_and_trust_id_index_upgrade(self):
self.upgrade(54)
self.upgrade(55)
table = sqlalchemy.Table('token', self.metadata, autoload=True)
- index_data = [(idx.name, idx.columns.keys()) for idx in table.indexes]
+ index_data = [(idx.name, list(idx.columns.keys()))
+ for idx in table.indexes]
self.assertIn(('ix_token_user_id', ['user_id']), index_data)
self.assertIn(('ix_token_trust_id', ['trust_id']), index_data)
- def test_token_user_id_and_trust_id_index_downgrade(self):
- self.upgrade(55)
- self.downgrade(54)
- table = sqlalchemy.Table('token', self.metadata, autoload=True)
- index_data = [(idx.name, idx.columns.keys()) for idx in table.indexes]
- self.assertNotIn(('ix_token_user_id', ['user_id']), index_data)
- self.assertNotIn(('ix_token_trust_id', ['trust_id']), index_data)
-
- def test_remove_actor_id_index(self):
- self.upgrade(54)
- self.downgrade(53)
- table = sqlalchemy.Table('assignment', self.metadata, autoload=True)
- index_data = [(idx.name, idx.columns.keys()) for idx in table.indexes]
- self.assertNotIn(('ix_actor_id', ['actor_id']), index_data)
-
def test_project_parent_id_upgrade(self):
self.upgrade(61)
self.assertTableColumns('project',
['id', 'name', 'extra', 'description',
'enabled', 'domain_id', 'parent_id'])
- def test_project_parent_id_downgrade(self):
- self.upgrade(61)
- self.downgrade(60)
- self.assertTableColumns('project',
- ['id', 'name', 'extra', 'description',
- 'enabled', 'domain_id'])
-
- def test_project_parent_id_cleanup(self):
- # make sure that the parent_id field is dropped in the downgrade
- self.upgrade(61)
- session = self.Session()
- domain = {'id': uuid.uuid4().hex,
- 'name': uuid.uuid4().hex,
- 'enabled': True}
- acme = {
- 'id': uuid.uuid4().hex,
- 'description': uuid.uuid4().hex,
- 'domain_id': domain['id'],
- 'name': uuid.uuid4().hex,
- 'parent_id': None
- }
- beta = {
- 'id': uuid.uuid4().hex,
- 'description': uuid.uuid4().hex,
- 'domain_id': domain['id'],
- 'name': uuid.uuid4().hex,
- 'parent_id': acme['id']
- }
- self.insert_dict(session, 'domain', domain)
- self.insert_dict(session, 'project', acme)
- self.insert_dict(session, 'project', beta)
- proj_table = sqlalchemy.Table('project', self.metadata, autoload=True)
- self.assertEqual(2, session.query(proj_table).count())
- session.close()
- self.downgrade(60)
- session = self.Session()
- self.metadata.clear()
- proj_table = sqlalchemy.Table('project', self.metadata, autoload=True)
- self.assertEqual(2, session.query(proj_table).count())
- project = session.query(proj_table)[0]
- self.assertRaises(AttributeError, getattr, project, 'parent_id')
-
def test_drop_assignment_role_fk(self):
self.upgrade(61)
self.assertTrue(self.does_fk_exist('assignment', 'role_id'))
@@ -626,8 +433,80 @@ class SqlUpgradeTests(SqlMigrateBase):
if self.engine.name != 'sqlite':
# sqlite does not support FK deletions (or enforcement)
self.assertFalse(self.does_fk_exist('assignment', 'role_id'))
- self.downgrade(61)
- self.assertTrue(self.does_fk_exist('assignment', 'role_id'))
+
+ def test_insert_assignment_inherited_pk(self):
+ ASSIGNMENT_TABLE_NAME = 'assignment'
+ INHERITED_COLUMN_NAME = 'inherited'
+ ROLE_TABLE_NAME = 'role'
+
+ self.upgrade(72)
+
+ # Check that the 'inherited' column is not part of the PK
+ self.assertFalse(self.does_pk_exist(ASSIGNMENT_TABLE_NAME,
+ INHERITED_COLUMN_NAME))
+
+ session = self.Session()
+
+ role = {'id': uuid.uuid4().hex,
+ 'name': uuid.uuid4().hex}
+ self.insert_dict(session, ROLE_TABLE_NAME, role)
+
+ # Create both inherited and noninherited role assignments
+ inherited = {'type': 'UserProject',
+ 'actor_id': uuid.uuid4().hex,
+ 'target_id': uuid.uuid4().hex,
+ 'role_id': role['id'],
+ 'inherited': True}
+
+ noninherited = inherited.copy()
+ noninherited['inherited'] = False
+
+ # Create another inherited role assignment as a spoiler
+ spoiler = inherited.copy()
+ spoiler['actor_id'] = uuid.uuid4().hex
+
+ self.insert_dict(session, ASSIGNMENT_TABLE_NAME, inherited)
+ self.insert_dict(session, ASSIGNMENT_TABLE_NAME, spoiler)
+
+ # Since 'inherited' is not part of the PK, we can't insert noninherited
+ self.assertRaises(db_exception.DBDuplicateEntry,
+ self.insert_dict,
+ session,
+ ASSIGNMENT_TABLE_NAME,
+ noninherited)
+
+ session.close()
+
+ self.upgrade(73)
+
+ session = self.Session()
+ self.metadata.clear()
+
+ # Check that the 'inherited' column is now part of the PK
+ self.assertTrue(self.does_pk_exist(ASSIGNMENT_TABLE_NAME,
+ INHERITED_COLUMN_NAME))
+
+ # The noninherited role assignment can now be inserted
+ self.insert_dict(session, ASSIGNMENT_TABLE_NAME, noninherited)
+
+ assignment_table = sqlalchemy.Table(ASSIGNMENT_TABLE_NAME,
+ self.metadata,
+ autoload=True)
+
+ assignments = session.query(assignment_table).all()
+ for assignment in (inherited, spoiler, noninherited):
+ self.assertIn((assignment['type'], assignment['actor_id'],
+ assignment['target_id'], assignment['role_id'],
+ assignment['inherited']),
+ assignments)
+
+ def does_pk_exist(self, table, pk_column):
+ """Checks whether a column is primary key on a table."""
+
+ inspector = reflection.Inspector.from_engine(self.engine)
+ pk_columns = inspector.get_pk_constraint(table)['constrained_columns']
+
+ return pk_column in pk_columns
def does_fk_exist(self, table, fk_column):
inspector = reflection.Inspector.from_engine(self.engine)
@@ -642,14 +521,7 @@ class SqlUpgradeTests(SqlMigrateBase):
['id', 'description', 'parent_region_id',
'extra'])
- def test_drop_region_url_downgrade(self):
- self.upgrade(63)
- self.downgrade(62)
- self.assertTableColumns('region',
- ['id', 'description', 'parent_region_id',
- 'extra', 'url'])
-
- def test_drop_domain_fk(self):
+ def test_domain_fk(self):
self.upgrade(63)
self.assertTrue(self.does_fk_exist('group', 'domain_id'))
self.assertTrue(self.does_fk_exist('user', 'domain_id'))
@@ -658,9 +530,6 @@ class SqlUpgradeTests(SqlMigrateBase):
# sqlite does not support FK deletions (or enforcement)
self.assertFalse(self.does_fk_exist('group', 'domain_id'))
self.assertFalse(self.does_fk_exist('user', 'domain_id'))
- self.downgrade(63)
- self.assertTrue(self.does_fk_exist('group', 'domain_id'))
- self.assertTrue(self.does_fk_exist('user', 'domain_id'))
def test_add_domain_config(self):
whitelisted_table = 'whitelisted_config'
@@ -673,9 +542,6 @@ class SqlUpgradeTests(SqlMigrateBase):
['domain_id', 'group', 'option', 'value'])
self.assertTableColumns(sensitive_table,
['domain_id', 'group', 'option', 'value'])
- self.downgrade(64)
- self.assertTableDoesNotExist(whitelisted_table)
- self.assertTableDoesNotExist(sensitive_table)
def test_fixup_service_name_value_upgrade(self):
"""Update service name data from `extra` to empty string."""
@@ -724,6 +590,10 @@ class SqlUpgradeTests(SqlMigrateBase):
random_attr_name_empty, random_attr_name_none_str),
]
+ # NOTE(viktors): Add a service with empty extra field
+ self.insert_dict(session, 'service',
+ {'id': uuid.uuid4().hex, 'type': uuid.uuid4().hex})
+
session.close()
self.upgrade(66)
session = self.Session()
@@ -744,6 +614,28 @@ class SqlUpgradeTests(SqlMigrateBase):
extra = fetch_service_extra(service_id)
self.assertDictEqual(exp_extra, extra, msg)
+ def _does_index_exist(self, table_name, index_name):
+ meta = sqlalchemy.MetaData(bind=self.engine)
+ table = sqlalchemy.Table('assignment', meta, autoload=True)
+ return index_name in [idx.name for idx in table.indexes]
+
+ def test_drop_assignment_role_id_index_mysql(self):
+ self.upgrade(66)
+ if self.engine.name == "mysql":
+ self.assertTrue(self._does_index_exist('assignment',
+ 'assignment_role_id_fkey'))
+ self.upgrade(67)
+ if self.engine.name == "mysql":
+ self.assertFalse(self._does_index_exist('assignment',
+ 'assignment_role_id_fkey'))
+
+ def test_project_is_domain_upgrade(self):
+ self.upgrade(74)
+ self.assertTableColumns('project',
+ ['id', 'name', 'extra', 'description',
+ 'enabled', 'domain_id', 'parent_id',
+ 'is_domain'])
+
def populate_user_table(self, with_pass_enab=False,
with_pass_enab_domain=False):
# Populate the appropriate fields in the user
@@ -881,6 +773,13 @@ class VersionTests(SqlMigrateBase):
version = migration_helpers.get_db_version()
self.assertEqual(self.max_version, version)
+ def test_assert_not_schema_downgrade(self):
+ self.upgrade(self.max_version)
+ self.assertRaises(
+ db_exception.DbMigrationError,
+ migration_helpers._sync_common_repo,
+ self.max_version - 1)
+
def test_extension_not_controlled(self):
"""When get the version before controlling, raises DbMigrationError."""
self.assertRaises(db_exception.DbMigrationError,
@@ -889,7 +788,7 @@ class VersionTests(SqlMigrateBase):
def test_extension_initial(self):
"""When get the initial version of an extension, it's 0."""
- for name, extension in six.iteritems(EXTENSIONS):
+ for name, extension in EXTENSIONS.items():
abs_path = migration_helpers.find_migrate_repo(extension)
migration.db_version_control(sql.get_engine(), abs_path)
version = migration_helpers.get_db_version(extension=name)
@@ -898,18 +797,7 @@ class VersionTests(SqlMigrateBase):
def test_extension_migrated(self):
"""When get the version after migrating an extension, it's not 0."""
- for name, extension in six.iteritems(EXTENSIONS):
- abs_path = migration_helpers.find_migrate_repo(extension)
- migration.db_version_control(sql.get_engine(), abs_path)
- migration.db_sync(sql.get_engine(), abs_path)
- version = migration_helpers.get_db_version(extension=name)
- self.assertTrue(
- version > 0,
- "Version for %s didn't change after migrated?" % name)
-
- def test_extension_downgraded(self):
- """When get the version after downgrading an extension, it is 0."""
- for name, extension in six.iteritems(EXTENSIONS):
+ for name, extension in EXTENSIONS.items():
abs_path = migration_helpers.find_migrate_repo(extension)
migration.db_version_control(sql.get_engine(), abs_path)
migration.db_sync(sql.get_engine(), abs_path)
@@ -917,10 +805,47 @@ class VersionTests(SqlMigrateBase):
self.assertTrue(
version > 0,
"Version for %s didn't change after migrated?" % name)
- migration.db_sync(sql.get_engine(), abs_path, version=0)
- version = migration_helpers.get_db_version(extension=name)
- self.assertEqual(0, version,
- 'Migrate version for %s is not 0' % name)
+ # Verify downgrades cannot occur
+ self.assertRaises(
+ db_exception.DbMigrationError,
+ migration_helpers._sync_extension_repo,
+ extension=name,
+ version=0)
+
+ def test_extension_federation_upgraded_values(self):
+ abs_path = migration_helpers.find_migrate_repo(federation)
+ migration.db_version_control(sql.get_engine(), abs_path)
+ migration.db_sync(sql.get_engine(), abs_path, version=6)
+ idp_table = sqlalchemy.Table("identity_provider",
+ self.metadata,
+ autoload=True)
+ idps = [{'id': uuid.uuid4().hex,
+ 'enabled': True,
+ 'description': uuid.uuid4().hex,
+ 'remote_id': uuid.uuid4().hex},
+ {'id': uuid.uuid4().hex,
+ 'enabled': True,
+ 'description': uuid.uuid4().hex,
+ 'remote_id': uuid.uuid4().hex}]
+ for idp in idps:
+ ins = idp_table.insert().values({'id': idp['id'],
+ 'enabled': idp['enabled'],
+ 'description': idp['description'],
+ 'remote_id': idp['remote_id']})
+ self.engine.execute(ins)
+ migration.db_sync(sql.get_engine(), abs_path)
+ idp_remote_ids_table = sqlalchemy.Table("idp_remote_ids",
+ self.metadata,
+ autoload=True)
+ for idp in idps:
+ s = idp_remote_ids_table.select().where(
+ idp_remote_ids_table.c.idp_id == idp['id'])
+ remote = self.engine.execute(s).fetchone()
+ self.assertEqual(idp['remote_id'],
+ remote['remote_id'],
+ 'remote_ids must be preserved during the '
+ 'migration from identity_provider table to '
+ 'idp_remote_ids table')
def test_unexpected_extension(self):
"""The version for an extension that doesn't exist raises ImportError.
diff --git a/keystone-moon/keystone/tests/unit/test_ssl.py b/keystone-moon/keystone/tests/unit/test_ssl.py
index c5f443b0..3b86bb2d 100644
--- a/keystone-moon/keystone/tests/unit/test_ssl.py
+++ b/keystone-moon/keystone/tests/unit/test_ssl.py
@@ -36,6 +36,16 @@ CLIENT = os.path.join(CERTDIR, 'middleware.pem')
class SSLTestCase(tests.TestCase):
def setUp(self):
super(SSLTestCase, self).setUp()
+ raise self.skipTest('SSL Version and Ciphers cannot be configured '
+ 'with eventlet, some platforms have disabled '
+ 'SSLv3. See bug 1381365.')
+ # NOTE(morganfainberg): It has been determined that this
+ # will not be fixed. These tests should be re-enabled for the full
+ # functional test suite when run against an SSL terminated
+ # endpoint. Some distributions/environments have patched OpenSSL to
+ # not have SSLv3 at all due to POODLE and this causes differing
+ # behavior depending on platform. See bug 1381365 for more information.
+
# NOTE(jamespage):
# Deal with more secure certificate chain verification
# introduced in python 2.7.9 under PEP-0476
diff --git a/keystone-moon/keystone/tests/unit/test_token_provider.py b/keystone-moon/keystone/tests/unit/test_token_provider.py
index dc08664f..3ebb0187 100644
--- a/keystone-moon/keystone/tests/unit/test_token_provider.py
+++ b/keystone-moon/keystone/tests/unit/test_token_provider.py
@@ -18,11 +18,14 @@ from oslo_config import cfg
from oslo_utils import timeutils
from keystone.common import dependency
+from keystone.common import utils
from keystone import exception
from keystone.tests import unit as tests
from keystone.tests.unit.ksfixtures import database
from keystone import token
+from keystone.token.providers import fernet
from keystone.token.providers import pki
+from keystone.token.providers import pkiz
from keystone.token.providers import uuid
@@ -655,8 +658,8 @@ def create_v2_token():
return {
"access": {
"token": {
- "expires": timeutils.isotime(timeutils.utcnow() +
- FUTURE_DELTA),
+ "expires": utils.isotime(timeutils.utcnow() +
+ FUTURE_DELTA),
"issued_at": "2013-05-21T00:02:43.941473Z",
"tenant": {
"enabled": True,
@@ -671,7 +674,7 @@ def create_v2_token():
SAMPLE_V2_TOKEN_EXPIRED = {
"access": {
"token": {
- "expires": timeutils.isotime(CURRENT_DATE),
+ "expires": utils.isotime(CURRENT_DATE),
"issued_at": "2013-05-21T00:02:43.941473Z",
"tenant": {
"enabled": True,
@@ -687,7 +690,7 @@ def create_v3_token():
return {
"token": {
'methods': [],
- "expires_at": timeutils.isotime(timeutils.utcnow() + FUTURE_DELTA),
+ "expires_at": utils.isotime(timeutils.utcnow() + FUTURE_DELTA),
"issued_at": "2013-05-21T00:02:43.941473Z",
}
}
@@ -695,7 +698,7 @@ def create_v3_token():
SAMPLE_V3_TOKEN_EXPIRED = {
"token": {
- "expires_at": timeutils.isotime(CURRENT_DATE),
+ "expires_at": utils.isotime(CURRENT_DATE),
"issued_at": "2013-05-21T00:02:43.941473Z",
}
}
@@ -742,22 +745,20 @@ class TestTokenProvider(tests.TestCase):
uuid.Provider)
dependency.reset()
- self.config_fixture.config(
- group='token',
- provider='keystone.token.providers.uuid.Provider')
- token.provider.Manager()
+ self.config_fixture.config(group='token', provider='uuid')
+ self.assertIsInstance(token.provider.Manager().driver, uuid.Provider)
dependency.reset()
- self.config_fixture.config(
- group='token',
- provider='keystone.token.providers.pki.Provider')
- token.provider.Manager()
+ self.config_fixture.config(group='token', provider='pki')
+ self.assertIsInstance(token.provider.Manager().driver, pki.Provider)
dependency.reset()
- self.config_fixture.config(
- group='token',
- provider='keystone.token.providers.pkiz.Provider')
- token.provider.Manager()
+ self.config_fixture.config(group='token', provider='pkiz')
+ self.assertIsInstance(token.provider.Manager().driver, pkiz.Provider)
+
+ dependency.reset()
+ self.config_fixture.config(group='token', provider='fernet')
+ self.assertIsInstance(token.provider.Manager().driver, fernet.Provider)
def test_unsupported_token_provider(self):
self.config_fixture.config(group='token',
diff --git a/keystone-moon/keystone/tests/unit/test_v2.py b/keystone-moon/keystone/tests/unit/test_v2.py
index 8c7c3792..415150cf 100644
--- a/keystone-moon/keystone/tests/unit/test_v2.py
+++ b/keystone-moon/keystone/tests/unit/test_v2.py
@@ -56,6 +56,8 @@ class CoreApiTests(object):
def assertValidTenant(self, tenant):
self.assertIsNotNone(tenant.get('id'))
self.assertIsNotNone(tenant.get('name'))
+ self.assertNotIn('domain_id', tenant)
+ self.assertNotIn('parent_id', tenant)
def assertValidUser(self, user):
self.assertIsNotNone(user.get('id'))
@@ -1373,12 +1375,10 @@ class V2TestCase(RestfulTestCase, CoreApiTests, LegacyV2UsernameTests):
class RevokeApiTestCase(V2TestCase):
def config_overrides(self):
super(RevokeApiTestCase, self).config_overrides()
- self.config_fixture.config(
- group='revoke',
- driver='keystone.contrib.revoke.backends.kvs.Revoke')
+ self.config_fixture.config(group='revoke', driver='kvs')
self.config_fixture.config(
group='token',
- provider='keystone.token.providers.pki.Provider',
+ provider='pki',
revoke_by_id=False)
def test_fetch_revocation_list_admin_200(self):
@@ -1410,9 +1410,7 @@ class TestFernetTokenProviderV2(RestfulTestCase):
def config_overrides(self):
super(TestFernetTokenProviderV2, self).config_overrides()
- self.config_fixture.config(
- group='token',
- provider='keystone.token.providers.fernet.Provider')
+ self.config_fixture.config(group='token', provider='fernet')
def test_authenticate_unscoped_token(self):
unscoped_token = self.get_unscoped_token()
@@ -1498,3 +1496,44 @@ class TestFernetTokenProviderV2(RestfulTestCase):
path=path,
token=CONF.admin_token,
expected_status=200)
+
+ def test_rescoped_tokens_maintain_original_expiration(self):
+ project_ref = self.new_project_ref()
+ self.resource_api.create_project(project_ref['id'], project_ref)
+ self.assignment_api.add_role_to_user_and_project(self.user_foo['id'],
+ project_ref['id'],
+ self.role_admin['id'])
+ resp = self.public_request(
+ method='POST',
+ path='/v2.0/tokens',
+ body={
+ 'auth': {
+ 'tenantName': project_ref['name'],
+ 'passwordCredentials': {
+ 'username': self.user_foo['name'],
+ 'password': self.user_foo['password']
+ }
+ }
+ },
+ # NOTE(lbragstad): This test may need to be refactored if Keystone
+ # decides to disallow rescoping using a scoped token.
+ expected_status=200)
+ original_token = resp.result['access']['token']['id']
+ original_expiration = resp.result['access']['token']['expires']
+
+ resp = self.public_request(
+ method='POST',
+ path='/v2.0/tokens',
+ body={
+ 'auth': {
+ 'tenantName': project_ref['name'],
+ 'token': {
+ 'id': original_token,
+ }
+ }
+ },
+ expected_status=200)
+ rescoped_token = resp.result['access']['token']['id']
+ rescoped_expiration = resp.result['access']['token']['expires']
+ self.assertNotEqual(original_token, rescoped_token)
+ self.assertEqual(original_expiration, rescoped_expiration)
diff --git a/keystone-moon/keystone/tests/unit/test_v2_controller.py b/keystone-moon/keystone/tests/unit/test_v2_controller.py
index 6c1edd0a..0d4b3cdc 100644
--- a/keystone-moon/keystone/tests/unit/test_v2_controller.py
+++ b/keystone-moon/keystone/tests/unit/test_v2_controller.py
@@ -16,6 +16,7 @@
import uuid
from keystone.assignment import controllers as assignment_controllers
+from keystone import exception
from keystone.resource import controllers as resource_controllers
from keystone.tests import unit as tests
from keystone.tests.unit import default_fixtures
@@ -92,4 +93,51 @@ class TenantTestCase(tests.TestCase):
for tenant in default_fixtures.TENANTS:
tenant_copy = tenant.copy()
tenant_copy.pop('domain_id')
+ tenant_copy.pop('parent_id')
+ tenant_copy.pop('is_domain')
self.assertIn(tenant_copy, refs['tenants'])
+
+ def _create_is_domain_project(self):
+ project = {'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex,
+ 'domain_id': 'default', 'is_domain': True}
+ project_ref = self.resource_api.create_project(project['id'], project)
+ return self.tenant_controller.v3_to_v2_project(project_ref)
+
+ def test_update_is_domain_project_not_found(self):
+ """Test that update is_domain project is not allowed in v2."""
+ project = self._create_is_domain_project()
+
+ project['name'] = uuid.uuid4().hex
+ self.assertRaises(
+ exception.ProjectNotFound,
+ self.tenant_controller.update_project,
+ _ADMIN_CONTEXT,
+ project['id'],
+ project
+ )
+
+ def test_delete_is_domain_project_not_found(self):
+ """Test that delete is_domain project is not allowed in v2."""
+ project = self._create_is_domain_project()
+
+ self.assertRaises(
+ exception.ProjectNotFound,
+ self.tenant_controller.delete_project,
+ _ADMIN_CONTEXT,
+ project['id']
+ )
+
+ def test_list_is_domain_project_not_found(self):
+ """Test v2 get_all_projects having projects that act as a domain.
+
+ In v2 no project with the is_domain flag enabled should be
+ returned.
+ """
+ project1 = self._create_is_domain_project()
+ project2 = self._create_is_domain_project()
+
+ refs = self.tenant_controller.get_all_projects(_ADMIN_CONTEXT)
+ projects = refs.get('tenants')
+
+ self.assertNotIn(project1, projects)
+ self.assertNotIn(project2, projects)
diff --git a/keystone-moon/keystone/tests/unit/test_v2_keystoneclient.py b/keystone-moon/keystone/tests/unit/test_v2_keystoneclient.py
index 7abc5bc4..e0843605 100644
--- a/keystone-moon/keystone/tests/unit/test_v2_keystoneclient.py
+++ b/keystone-moon/keystone/tests/unit/test_v2_keystoneclient.py
@@ -15,12 +15,14 @@
import datetime
import uuid
+from keystoneclient.contrib.ec2 import utils as ec2_utils
from keystoneclient import exceptions as client_exceptions
from keystoneclient.v2_0 import client as ks_client
import mock
from oslo_config import cfg
from oslo_serialization import jsonutils
from oslo_utils import timeutils
+from six.moves import range
import webob
from keystone.tests import unit as tests
@@ -35,6 +37,11 @@ DEFAULT_DOMAIN_ID = CONF.identity.default_domain_id
class ClientDrivenTestCase(tests.TestCase):
+ def config_files(self):
+ config_files = super(ClientDrivenTestCase, self).config_files()
+ config_files.append(tests.dirs.tests_conf('backend_sql.conf'))
+ return config_files
+
def setUp(self):
super(ClientDrivenTestCase, self).setUp()
@@ -61,7 +68,10 @@ class ClientDrivenTestCase(tests.TestCase):
fixture = self.useFixture(appserver.AppServer(conf, appserver.ADMIN))
self.admin_server = fixture.server
- self.addCleanup(self.cleanup_instance('public_server', 'admin_server'))
+ self.default_client = self.get_client()
+
+ self.addCleanup(self.cleanup_instance('public_server', 'admin_server',
+ 'default_client'))
def _public_url(self):
public_port = self.public_server.socket_info['socket'][1]
@@ -707,6 +717,20 @@ class ClientDrivenTestCase(tests.TestCase):
client.roles.create,
name="")
+ def test_role_create_member_role(self):
+ # delete the member role so that we can recreate it
+ client = self.get_client(admin=True)
+ client.roles.delete(role=CONF.member_role_id)
+
+ # deleting the member role revokes our token, so re-authenticate
+ client = self.get_client(admin=True)
+
+ # specify only the role name on creation
+ role = client.roles.create(name=CONF.member_role_name)
+
+ # the ID should be set as defined in CONF
+ self.assertEqual(CONF.member_role_id, role.id)
+
def test_role_get_404(self):
client = self.get_client(admin=True)
self.assertRaises(client_exceptions.NotFound,
@@ -1043,3 +1067,308 @@ class ClientDrivenTestCase(tests.TestCase):
self.assertRaises(client_exceptions.Unauthorized, client.tenants.list)
client.auth_token = new_token_id
client.tenants.list()
+
+ def test_endpoint_crud(self):
+ client = self.get_client(admin=True)
+
+ service = client.services.create(name=uuid.uuid4().hex,
+ service_type=uuid.uuid4().hex,
+ description=uuid.uuid4().hex)
+
+ endpoint_region = uuid.uuid4().hex
+ invalid_service_id = uuid.uuid4().hex
+ endpoint_publicurl = uuid.uuid4().hex
+ endpoint_internalurl = uuid.uuid4().hex
+ endpoint_adminurl = uuid.uuid4().hex
+
+ # a non-existent service ID should trigger a 400
+ self.assertRaises(client_exceptions.BadRequest,
+ client.endpoints.create,
+ region=endpoint_region,
+ service_id=invalid_service_id,
+ publicurl=endpoint_publicurl,
+ adminurl=endpoint_adminurl,
+ internalurl=endpoint_internalurl)
+
+ endpoint = client.endpoints.create(region=endpoint_region,
+ service_id=service.id,
+ publicurl=endpoint_publicurl,
+ adminurl=endpoint_adminurl,
+ internalurl=endpoint_internalurl)
+
+ self.assertEqual(endpoint_region, endpoint.region)
+ self.assertEqual(service.id, endpoint.service_id)
+ self.assertEqual(endpoint_publicurl, endpoint.publicurl)
+ self.assertEqual(endpoint_internalurl, endpoint.internalurl)
+ self.assertEqual(endpoint_adminurl, endpoint.adminurl)
+
+ client.endpoints.delete(id=endpoint.id)
+ self.assertRaises(client_exceptions.NotFound, client.endpoints.delete,
+ id=endpoint.id)
+
+ def _send_ec2_auth_request(self, credentials, client=None):
+ if not client:
+ client = self.default_client
+ url = '%s/ec2tokens' % self.default_client.auth_url
+ (resp, token) = client.request(
+ url=url, method='POST',
+ body={'credentials': credentials})
+ return resp, token
+
+ def _generate_default_user_ec2_credentials(self):
+ cred = self. default_client.ec2.create(
+ user_id=self.user_foo['id'],
+ tenant_id=self.tenant_bar['id'])
+ return self._generate_user_ec2_credentials(cred.access, cred.secret)
+
+ def _generate_user_ec2_credentials(self, access, secret):
+ signer = ec2_utils.Ec2Signer(secret)
+ credentials = {'params': {'SignatureVersion': '2'},
+ 'access': access,
+ 'verb': 'GET',
+ 'host': 'localhost',
+ 'path': '/service/cloud'}
+ signature = signer.generate(credentials)
+ return credentials, signature
+
+ def test_ec2_auth_success(self):
+ credentials, signature = self._generate_default_user_ec2_credentials()
+ credentials['signature'] = signature
+ resp, token = self._send_ec2_auth_request(credentials)
+ self.assertEqual(200, resp.status_code)
+ self.assertIn('access', token)
+
+ def test_ec2_auth_success_trust(self):
+ # Add "other" role user_foo and create trust delegating it to user_two
+ self.assignment_api.add_role_to_user_and_project(
+ self.user_foo['id'],
+ self.tenant_bar['id'],
+ self.role_other['id'])
+ trust_id = 'atrust123'
+ trust = {'trustor_user_id': self.user_foo['id'],
+ 'trustee_user_id': self.user_two['id'],
+ 'project_id': self.tenant_bar['id'],
+ 'impersonation': True}
+ roles = [self.role_other]
+ self.trust_api.create_trust(trust_id, trust, roles)
+
+ # Create a client for user_two, scoped to the trust
+ client = self.get_client(self.user_two)
+ ret = client.authenticate(trust_id=trust_id,
+ tenant_id=self.tenant_bar['id'])
+ self.assertTrue(ret)
+ self.assertTrue(client.auth_ref.trust_scoped)
+ self.assertEqual(trust_id, client.auth_ref.trust_id)
+
+ # Create an ec2 keypair using the trust client impersonating user_foo
+ cred = client.ec2.create(user_id=self.user_foo['id'],
+ tenant_id=self.tenant_bar['id'])
+ credentials, signature = self._generate_user_ec2_credentials(
+ cred.access, cred.secret)
+ credentials['signature'] = signature
+ resp, token = self._send_ec2_auth_request(credentials)
+ self.assertEqual(200, resp.status_code)
+ self.assertEqual(trust_id, token['access']['trust']['id'])
+ # TODO(shardy) we really want to check the roles and trustee
+ # but because of where the stubbing happens we don't seem to
+ # hit the necessary code in controllers.py _authenticate_token
+ # so although all is OK via a real request, it incorrect in
+ # this test..
+
+ def test_ec2_auth_failure(self):
+ credentials, signature = self._generate_default_user_ec2_credentials()
+ credentials['signature'] = uuid.uuid4().hex
+ self.assertRaises(client_exceptions.Unauthorized,
+ self._send_ec2_auth_request,
+ credentials)
+
+ def test_ec2_credential_crud(self):
+ creds = self.default_client.ec2.list(user_id=self.user_foo['id'])
+ self.assertEqual([], creds)
+
+ cred = self.default_client.ec2.create(user_id=self.user_foo['id'],
+ tenant_id=self.tenant_bar['id'])
+ creds = self.default_client.ec2.list(user_id=self.user_foo['id'])
+ self.assertEqual(creds, [cred])
+ got = self.default_client.ec2.get(user_id=self.user_foo['id'],
+ access=cred.access)
+ self.assertEqual(cred, got)
+
+ self.default_client.ec2.delete(user_id=self.user_foo['id'],
+ access=cred.access)
+ creds = self.default_client.ec2.list(user_id=self.user_foo['id'])
+ self.assertEqual([], creds)
+
+ def test_ec2_credential_crud_non_admin(self):
+ na_client = self.get_client(self.user_two)
+ creds = na_client.ec2.list(user_id=self.user_two['id'])
+ self.assertEqual([], creds)
+
+ cred = na_client.ec2.create(user_id=self.user_two['id'],
+ tenant_id=self.tenant_baz['id'])
+ creds = na_client.ec2.list(user_id=self.user_two['id'])
+ self.assertEqual(creds, [cred])
+ got = na_client.ec2.get(user_id=self.user_two['id'],
+ access=cred.access)
+ self.assertEqual(cred, got)
+
+ na_client.ec2.delete(user_id=self.user_two['id'],
+ access=cred.access)
+ creds = na_client.ec2.list(user_id=self.user_two['id'])
+ self.assertEqual([], creds)
+
+ def test_ec2_list_credentials(self):
+ cred_1 = self.default_client.ec2.create(
+ user_id=self.user_foo['id'],
+ tenant_id=self.tenant_bar['id'])
+ cred_2 = self.default_client.ec2.create(
+ user_id=self.user_foo['id'],
+ tenant_id=self.tenant_service['id'])
+ cred_3 = self.default_client.ec2.create(
+ user_id=self.user_foo['id'],
+ tenant_id=self.tenant_mtu['id'])
+ two = self.get_client(self.user_two)
+ cred_4 = two.ec2.create(user_id=self.user_two['id'],
+ tenant_id=self.tenant_bar['id'])
+ creds = self.default_client.ec2.list(user_id=self.user_foo['id'])
+ self.assertEqual(3, len(creds))
+ self.assertEqual(sorted([cred_1, cred_2, cred_3],
+ key=lambda x: x.access),
+ sorted(creds, key=lambda x: x.access))
+ self.assertNotIn(cred_4, creds)
+
+ def test_ec2_credentials_create_404(self):
+ self.assertRaises(client_exceptions.NotFound,
+ self.default_client.ec2.create,
+ user_id=uuid.uuid4().hex,
+ tenant_id=self.tenant_bar['id'])
+ self.assertRaises(client_exceptions.NotFound,
+ self.default_client.ec2.create,
+ user_id=self.user_foo['id'],
+ tenant_id=uuid.uuid4().hex)
+
+ def test_ec2_credentials_delete_404(self):
+ self.assertRaises(client_exceptions.NotFound,
+ self.default_client.ec2.delete,
+ user_id=uuid.uuid4().hex,
+ access=uuid.uuid4().hex)
+
+ def test_ec2_credentials_get_404(self):
+ self.assertRaises(client_exceptions.NotFound,
+ self.default_client.ec2.get,
+ user_id=uuid.uuid4().hex,
+ access=uuid.uuid4().hex)
+
+ def test_ec2_credentials_list_404(self):
+ self.assertRaises(client_exceptions.NotFound,
+ self.default_client.ec2.list,
+ user_id=uuid.uuid4().hex)
+
+ def test_ec2_credentials_list_user_forbidden(self):
+ two = self.get_client(self.user_two)
+ self.assertRaises(client_exceptions.Forbidden, two.ec2.list,
+ user_id=self.user_foo['id'])
+
+ def test_ec2_credentials_get_user_forbidden(self):
+ cred = self.default_client.ec2.create(user_id=self.user_foo['id'],
+ tenant_id=self.tenant_bar['id'])
+
+ two = self.get_client(self.user_two)
+ self.assertRaises(client_exceptions.Forbidden, two.ec2.get,
+ user_id=self.user_foo['id'], access=cred.access)
+
+ self.default_client.ec2.delete(user_id=self.user_foo['id'],
+ access=cred.access)
+
+ def test_ec2_credentials_delete_user_forbidden(self):
+ cred = self.default_client.ec2.create(user_id=self.user_foo['id'],
+ tenant_id=self.tenant_bar['id'])
+
+ two = self.get_client(self.user_two)
+ self.assertRaises(client_exceptions.Forbidden, two.ec2.delete,
+ user_id=self.user_foo['id'], access=cred.access)
+
+ self.default_client.ec2.delete(user_id=self.user_foo['id'],
+ access=cred.access)
+
+ def test_endpoint_create_nonexistent_service(self):
+ client = self.get_client(admin=True)
+ self.assertRaises(client_exceptions.BadRequest,
+ client.endpoints.create,
+ region=uuid.uuid4().hex,
+ service_id=uuid.uuid4().hex,
+ publicurl=uuid.uuid4().hex,
+ adminurl=uuid.uuid4().hex,
+ internalurl=uuid.uuid4().hex)
+
+ def test_policy_crud(self):
+ # FIXME(dolph): this test was written prior to the v3 implementation of
+ # the client and essentially refers to a non-existent
+ # policy manager in the v2 client. this test needs to be
+ # moved to a test suite running against the v3 api
+ self.skipTest('Written prior to v3 client; needs refactor')
+
+ client = self.get_client(admin=True)
+
+ policy_blob = uuid.uuid4().hex
+ policy_type = uuid.uuid4().hex
+ service = client.services.create(
+ name=uuid.uuid4().hex,
+ service_type=uuid.uuid4().hex,
+ description=uuid.uuid4().hex)
+ endpoint = client.endpoints.create(
+ service_id=service.id,
+ region=uuid.uuid4().hex,
+ adminurl=uuid.uuid4().hex,
+ internalurl=uuid.uuid4().hex,
+ publicurl=uuid.uuid4().hex)
+
+ # create
+ policy = client.policies.create(
+ blob=policy_blob,
+ type=policy_type,
+ endpoint=endpoint.id)
+ self.assertEqual(policy_blob, policy.policy)
+ self.assertEqual(policy_type, policy.type)
+ self.assertEqual(endpoint.id, policy.endpoint_id)
+
+ policy = client.policies.get(policy=policy.id)
+ self.assertEqual(policy_blob, policy.policy)
+ self.assertEqual(policy_type, policy.type)
+ self.assertEqual(endpoint.id, policy.endpoint_id)
+
+ endpoints = [x for x in client.endpoints.list() if x.id == endpoint.id]
+ endpoint = endpoints[0]
+ self.assertEqual(policy_blob, policy.policy)
+ self.assertEqual(policy_type, policy.type)
+ self.assertEqual(endpoint.id, policy.endpoint_id)
+
+ # update
+ policy_blob = uuid.uuid4().hex
+ policy_type = uuid.uuid4().hex
+ endpoint = client.endpoints.create(
+ service_id=service.id,
+ region=uuid.uuid4().hex,
+ adminurl=uuid.uuid4().hex,
+ internalurl=uuid.uuid4().hex,
+ publicurl=uuid.uuid4().hex)
+
+ policy = client.policies.update(
+ policy=policy.id,
+ blob=policy_blob,
+ type=policy_type,
+ endpoint=endpoint.id)
+
+ policy = client.policies.get(policy=policy.id)
+ self.assertEqual(policy_blob, policy.policy)
+ self.assertEqual(policy_type, policy.type)
+ self.assertEqual(endpoint.id, policy.endpoint_id)
+
+ # delete
+ client.policies.delete(policy=policy.id)
+ self.assertRaises(
+ client_exceptions.NotFound,
+ client.policies.get,
+ policy=policy.id)
+ policies = [x for x in client.policies.list() if x.id == policy.id]
+ self.assertEqual(0, len(policies))
diff --git a/keystone-moon/keystone/tests/unit/test_v3.py b/keystone-moon/keystone/tests/unit/test_v3.py
index f6d6ed93..9bbfa103 100644
--- a/keystone-moon/keystone/tests/unit/test_v3.py
+++ b/keystone-moon/keystone/tests/unit/test_v3.py
@@ -299,10 +299,11 @@ class RestfulTestCase(tests.SQLDriverOverrides, rest.RestfulTestCase,
ref = self.new_ref()
return ref
- def new_project_ref(self, domain_id, parent_id=None):
+ def new_project_ref(self, domain_id=None, parent_id=None, is_domain=False):
ref = self.new_ref()
ref['domain_id'] = domain_id
ref['parent_id'] = parent_id
+ ref['is_domain'] = is_domain
return ref
def new_user_ref(self, domain_id, project_id=None):
@@ -362,9 +363,9 @@ class RestfulTestCase(tests.SQLDriverOverrides, rest.RestfulTestCase,
if isinstance(expires, six.string_types):
ref['expires_at'] = expires
elif isinstance(expires, dict):
- ref['expires_at'] = timeutils.strtime(
- timeutils.utcnow() + datetime.timedelta(**expires),
- fmt=TIME_FORMAT)
+ ref['expires_at'] = (
+ timeutils.utcnow() + datetime.timedelta(**expires)
+ ).strftime(TIME_FORMAT)
elif expires is None:
pass
else:
@@ -396,6 +397,29 @@ class RestfulTestCase(tests.SQLDriverOverrides, rest.RestfulTestCase,
return project
+ def get_unscoped_token(self):
+ """Convenience method so that we can test authenticated requests."""
+ r = self.admin_request(
+ method='POST',
+ path='/v3/auth/tokens',
+ body={
+ 'auth': {
+ 'identity': {
+ 'methods': ['password'],
+ 'password': {
+ 'user': {
+ 'name': self.user['name'],
+ 'password': self.user['password'],
+ 'domain': {
+ 'id': self.user['domain_id']
+ }
+ }
+ }
+ }
+ }
+ })
+ return r.headers.get('X-Subject-Token')
+
def get_scoped_token(self):
"""Convenience method so that we can test authenticated requests."""
r = self.admin_request(
@@ -424,6 +448,34 @@ class RestfulTestCase(tests.SQLDriverOverrides, rest.RestfulTestCase,
})
return r.headers.get('X-Subject-Token')
+ def get_domain_scoped_token(self):
+ """Convenience method for requesting domain scoped token."""
+ r = self.admin_request(
+ method='POST',
+ path='/v3/auth/tokens',
+ body={
+ 'auth': {
+ 'identity': {
+ 'methods': ['password'],
+ 'password': {
+ 'user': {
+ 'name': self.user['name'],
+ 'password': self.user['password'],
+ 'domain': {
+ 'id': self.user['domain_id']
+ }
+ }
+ }
+ },
+ 'scope': {
+ 'domain': {
+ 'id': self.domain['id'],
+ }
+ }
+ }
+ })
+ return r.headers.get('X-Subject-Token')
+
def get_requested_token(self, auth):
"""Request the specific token we want."""
@@ -593,20 +645,6 @@ class RestfulTestCase(tests.SQLDriverOverrides, rest.RestfulTestCase,
return entity
- def assertDictContainsSubset(self, expected, actual):
- """"Asserts if dictionary actual is a superset of expected.
-
- Tests whether the key/value pairs in dictionary actual are a superset
- of those in expected.
-
- """
- for k, v in expected.iteritems():
- self.assertIn(k, actual)
- if isinstance(v, dict):
- self.assertDictContainsSubset(v, actual[k])
- else:
- self.assertEqual(v, actual[k])
-
# auth validation
def assertValidISO8601ExtendedFormatDatetime(self, dt):
@@ -752,7 +790,7 @@ class RestfulTestCase(tests.SQLDriverOverrides, rest.RestfulTestCase,
self.assertValidCatalog(resp.json['catalog'])
self.assertIn('links', resp.json)
self.assertIsInstance(resp.json['links'], dict)
- self.assertEqual(['self'], resp.json['links'].keys())
+ self.assertEqual(['self'], list(resp.json['links'].keys()))
self.assertEqual(
'http://localhost/v3/auth/catalog',
resp.json['links']['self'])
@@ -1258,6 +1296,42 @@ class AuthContextMiddlewareTestCase(RestfulTestCase):
self.assertDictEqual(req.environ.get(authorization.AUTH_CONTEXT_ENV),
{})
+ def test_unscoped_token_auth_context(self):
+ unscoped_token = self.get_unscoped_token()
+ req = self._mock_request_object(unscoped_token)
+ application = None
+ middleware.AuthContextMiddleware(application).process_request(req)
+ for key in ['project_id', 'domain_id', 'domain_name']:
+ self.assertNotIn(
+ key,
+ req.environ.get(authorization.AUTH_CONTEXT_ENV))
+
+ def test_project_scoped_token_auth_context(self):
+ project_scoped_token = self.get_scoped_token()
+ req = self._mock_request_object(project_scoped_token)
+ application = None
+ middleware.AuthContextMiddleware(application).process_request(req)
+ self.assertEqual(
+ self.project['id'],
+ req.environ.get(authorization.AUTH_CONTEXT_ENV)['project_id'])
+
+ def test_domain_scoped_token_auth_context(self):
+ # grant the domain role to user
+ path = '/domains/%s/users/%s/roles/%s' % (
+ self.domain['id'], self.user['id'], self.role['id'])
+ self.put(path=path)
+
+ domain_scoped_token = self.get_domain_scoped_token()
+ req = self._mock_request_object(domain_scoped_token)
+ application = None
+ middleware.AuthContextMiddleware(application).process_request(req)
+ self.assertEqual(
+ self.domain['id'],
+ req.environ.get(authorization.AUTH_CONTEXT_ENV)['domain_id'])
+ self.assertEqual(
+ self.domain['name'],
+ req.environ.get(authorization.AUTH_CONTEXT_ENV)['domain_name'])
+
class JsonHomeTestMixin(object):
"""JSON Home test
@@ -1281,3 +1355,88 @@ class JsonHomeTestMixin(object):
for rel in self.JSON_HOME_DATA:
self.assertThat(resp_data['resources'][rel],
matchers.Equals(self.JSON_HOME_DATA[rel]))
+
+
+class AssignmentTestMixin(object):
+ """To hold assignment helper functions."""
+
+ def build_role_assignment_query_url(self, effective=False, **filters):
+ """Build and return a role assignment query url with provided params.
+
+ Available filters are: domain_id, project_id, user_id, group_id,
+ role_id and inherited_to_projects.
+ """
+
+ query_params = '?effective' if effective else ''
+
+ for k, v in filters.items():
+ query_params += '?' if not query_params else '&'
+
+ if k == 'inherited_to_projects':
+ query_params += 'scope.OS-INHERIT:inherited_to=projects'
+ else:
+ if k in ['domain_id', 'project_id']:
+ query_params += 'scope.'
+ elif k not in ['user_id', 'group_id', 'role_id']:
+ raise ValueError(
+ 'Invalid key \'%s\' in provided filters.' % k)
+
+ query_params += '%s=%s' % (k.replace('_', '.'), v)
+
+ return '/role_assignments%s' % query_params
+
+ def build_role_assignment_link(self, **attribs):
+ """Build and return a role assignment link with provided attributes.
+
+ Provided attributes are expected to contain: domain_id or project_id,
+ user_id or group_id, role_id and, optionally, inherited_to_projects.
+ """
+
+ if attribs.get('domain_id'):
+ link = '/domains/' + attribs['domain_id']
+ else:
+ link = '/projects/' + attribs['project_id']
+
+ if attribs.get('user_id'):
+ link += '/users/' + attribs['user_id']
+ else:
+ link += '/groups/' + attribs['group_id']
+
+ link += '/roles/' + attribs['role_id']
+
+ if attribs.get('inherited_to_projects'):
+ return '/OS-INHERIT%s/inherited_to_projects' % link
+
+ return link
+
+ def build_role_assignment_entity(self, link=None, **attribs):
+ """Build and return a role assignment entity with provided attributes.
+
+ Provided attributes are expected to contain: domain_id or project_id,
+ user_id or group_id, role_id and, optionally, inherited_to_projects.
+ """
+
+ entity = {'links': {'assignment': (
+ link or self.build_role_assignment_link(**attribs))}}
+
+ if attribs.get('domain_id'):
+ entity['scope'] = {'domain': {'id': attribs['domain_id']}}
+ else:
+ entity['scope'] = {'project': {'id': attribs['project_id']}}
+
+ if attribs.get('user_id'):
+ entity['user'] = {'id': attribs['user_id']}
+
+ if attribs.get('group_id'):
+ entity['links']['membership'] = ('/groups/%s/users/%s' %
+ (attribs['group_id'],
+ attribs['user_id']))
+ else:
+ entity['group'] = {'id': attribs['group_id']}
+
+ entity['role'] = {'id': attribs['role_id']}
+
+ if attribs.get('inherited_to_projects'):
+ entity['scope']['OS-INHERIT:inherited_to'] = 'projects'
+
+ return entity
diff --git a/keystone-moon/keystone/tests/unit/test_v3_assignment.py b/keystone-moon/keystone/tests/unit/test_v3_assignment.py
index add14bfb..03e5d30b 100644
--- a/keystone-moon/keystone/tests/unit/test_v3_assignment.py
+++ b/keystone-moon/keystone/tests/unit/test_v3_assignment.py
@@ -11,107 +11,23 @@
# under the License.
import random
-import six
import uuid
from oslo_config import cfg
+from six.moves import range
from keystone.common import controller
from keystone import exception
from keystone.tests import unit as tests
from keystone.tests.unit import test_v3
+from keystone.tests.unit import utils
CONF = cfg.CONF
-def _build_role_assignment_query_url(effective=False, **filters):
- '''Build and return a role assignment query url with provided params.
-
- Available filters are: domain_id, project_id, user_id, group_id, role_id
- and inherited_to_projects.
-
- '''
-
- query_params = '?effective' if effective else ''
-
- for k, v in six.iteritems(filters):
- query_params += '?' if not query_params else '&'
-
- if k == 'inherited_to_projects':
- query_params += 'scope.OS-INHERIT:inherited_to=projects'
- else:
- if k in ['domain_id', 'project_id']:
- query_params += 'scope.'
- elif k not in ['user_id', 'group_id', 'role_id']:
- raise ValueError('Invalid key \'%s\' in provided filters.' % k)
-
- query_params += '%s=%s' % (k.replace('_', '.'), v)
-
- return '/role_assignments%s' % query_params
-
-
-def _build_role_assignment_link(**attribs):
- """Build and return a role assignment link with provided attributes.
-
- Provided attributes are expected to contain: domain_id or project_id,
- user_id or group_id, role_id and, optionally, inherited_to_projects.
-
- """
-
- if attribs.get('domain_id'):
- link = '/domains/' + attribs['domain_id']
- else:
- link = '/projects/' + attribs['project_id']
-
- if attribs.get('user_id'):
- link += '/users/' + attribs['user_id']
- else:
- link += '/groups/' + attribs['group_id']
-
- link += '/roles/' + attribs['role_id']
-
- if attribs.get('inherited_to_projects'):
- return '/OS-INHERIT%s/inherited_to_projects' % link
-
- return link
-
-
-def _build_role_assignment_entity(link=None, **attribs):
- """Build and return a role assignment entity with provided attributes.
-
- Provided attributes are expected to contain: domain_id or project_id,
- user_id or group_id, role_id and, optionally, inherited_to_projects.
-
- """
-
- entity = {'links': {'assignment': (
- link or _build_role_assignment_link(**attribs))}}
-
- if attribs.get('domain_id'):
- entity['scope'] = {'domain': {'id': attribs['domain_id']}}
- else:
- entity['scope'] = {'project': {'id': attribs['project_id']}}
-
- if attribs.get('user_id'):
- entity['user'] = {'id': attribs['user_id']}
-
- if attribs.get('group_id'):
- entity['links']['membership'] = ('/groups/%s/users/%s' %
- (attribs['group_id'],
- attribs['user_id']))
- else:
- entity['group'] = {'id': attribs['group_id']}
-
- entity['role'] = {'id': attribs['role_id']}
-
- if attribs.get('inherited_to_projects'):
- entity['scope']['OS-INHERIT:inherited_to'] = 'projects'
-
- return entity
-
-
-class AssignmentTestCase(test_v3.RestfulTestCase):
+class AssignmentTestCase(test_v3.RestfulTestCase,
+ test_v3.AssignmentTestMixin):
"""Test domains, projects, roles and role assignments."""
def setUp(self):
@@ -205,8 +121,8 @@ class AssignmentTestCase(test_v3.RestfulTestCase):
self.assignment_api.add_user_to_project(self.project2['id'],
self.user2['id'])
- # First check a user in that domain can authenticate, via
- # Both v2 and v3
+ # First check a user in that domain can authenticate. The v2 user
+ # cannot authenticate because they exist outside the default domain.
body = {
'auth': {
'passwordCredentials': {
@@ -216,7 +132,8 @@ class AssignmentTestCase(test_v3.RestfulTestCase):
'tenantId': self.project2['id']
}
}
- self.admin_request(path='/v2.0/tokens', method='POST', body=body)
+ self.admin_request(
+ path='/v2.0/tokens', method='POST', body=body, expected_status=401)
auth_data = self.build_authentication_request(
user_id=self.user2['id'],
@@ -507,26 +424,26 @@ class AssignmentTestCase(test_v3.RestfulTestCase):
for domain in create_domains():
self.assertRaises(
- AssertionError, self.assignment_api.create_domain,
+ AssertionError, self.resource_api.create_domain,
domain['id'], domain)
self.assertRaises(
- AssertionError, self.assignment_api.update_domain,
+ AssertionError, self.resource_api.update_domain,
domain['id'], domain)
self.assertRaises(
- exception.DomainNotFound, self.assignment_api.delete_domain,
+ exception.DomainNotFound, self.resource_api.delete_domain,
domain['id'])
# swap 'name' with 'id' and try again, expecting the request to
# gracefully fail
domain['id'], domain['name'] = domain['name'], domain['id']
self.assertRaises(
- AssertionError, self.assignment_api.create_domain,
+ AssertionError, self.resource_api.create_domain,
domain['id'], domain)
self.assertRaises(
- AssertionError, self.assignment_api.update_domain,
+ AssertionError, self.resource_api.update_domain,
domain['id'], domain)
self.assertRaises(
- exception.DomainNotFound, self.assignment_api.delete_domain,
+ exception.DomainNotFound, self.resource_api.delete_domain,
domain['id'])
def test_forbid_operations_on_defined_federated_domain(self):
@@ -542,47 +459,13 @@ class AssignmentTestCase(test_v3.RestfulTestCase):
domain = self.new_domain_ref()
domain['name'] = non_default_name
self.assertRaises(AssertionError,
- self.assignment_api.create_domain,
+ self.resource_api.create_domain,
domain['id'], domain)
self.assertRaises(exception.DomainNotFound,
- self.assignment_api.delete_domain,
+ self.resource_api.delete_domain,
domain['id'])
self.assertRaises(AssertionError,
- self.assignment_api.update_domain,
- domain['id'], domain)
-
- def test_set_federated_domain_when_config_empty(self):
- """Make sure we are operable even if config value is not properly
- set.
-
- This includes operations like create, update, delete.
-
- """
- federated_name = 'Federated'
- self.config_fixture.config(group='federation',
- federated_domain_name='')
- domain = self.new_domain_ref()
- domain['id'] = federated_name
- self.assertRaises(AssertionError,
- self.assignment_api.create_domain,
- domain['id'], domain)
- self.assertRaises(exception.DomainNotFound,
- self.assignment_api.delete_domain,
- domain['id'])
- self.assertRaises(AssertionError,
- self.assignment_api.update_domain,
- domain['id'], domain)
-
- # swap id with name
- domain['id'], domain['name'] = domain['name'], domain['id']
- self.assertRaises(AssertionError,
- self.assignment_api.create_domain,
- domain['id'], domain)
- self.assertRaises(exception.DomainNotFound,
- self.assignment_api.delete_domain,
- domain['id'])
- self.assertRaises(AssertionError,
- self.assignment_api.update_domain,
+ self.resource_api.update_domain,
domain['id'], domain)
# Project CRUD tests
@@ -606,8 +489,71 @@ class AssignmentTestCase(test_v3.RestfulTestCase):
"""Call ``POST /projects``."""
self.post('/projects', body={'project': {}}, expected_status=400)
+ def test_create_project_invalid_domain_id(self):
+ """Call ``POST /projects``."""
+ ref = self.new_project_ref(domain_id=uuid.uuid4().hex)
+ self.post('/projects', body={'project': ref}, expected_status=400)
+
+ def test_create_project_is_domain_not_allowed(self):
+ """Call ``POST /projects``.
+
+ Setting is_domain=True is not supported yet and should raise
+ NotImplemented.
+
+ """
+ ref = self.new_project_ref(domain_id=self.domain_id, is_domain=True)
+ self.post('/projects',
+ body={'project': ref},
+ expected_status=501)
+
+ @utils.wip('waiting for projects acting as domains implementation')
+ def test_create_project_without_parent_id_and_without_domain_id(self):
+ """Call ``POST /projects``."""
+
+ # Grant a domain role for the user
+ collection_url = (
+ '/domains/%(domain_id)s/users/%(user_id)s/roles' % {
+ 'domain_id': self.domain_id,
+ 'user_id': self.user['id']})
+ member_url = '%(collection_url)s/%(role_id)s' % {
+ 'collection_url': collection_url,
+ 'role_id': self.role_id}
+ self.put(member_url)
+
+ # Create an authentication request for a domain scoped token
+ auth = self.build_authentication_request(
+ user_id=self.user['id'],
+ password=self.user['password'],
+ domain_id=self.domain_id)
+
+ # Without domain_id and parent_id, the domain_id should be
+ # normalized to the domain on the token, when using a domain
+ # scoped token.
+ ref = self.new_project_ref()
+ r = self.post(
+ '/projects',
+ auth=auth,
+ body={'project': ref})
+ ref['domain_id'] = self.domain['id']
+ self.assertValidProjectResponse(r, ref)
+
+ @utils.wip('waiting for projects acting as domains implementation')
+ def test_create_project_with_parent_id_and_no_domain_id(self):
+ """Call ``POST /projects``."""
+ # With only the parent_id, the domain_id should be
+ # normalized to the parent's domain_id
+ ref_child = self.new_project_ref(parent_id=self.project['id'])
+
+ r = self.post(
+ '/projects',
+ body={'project': ref_child})
+ self.assertEqual(r.result['project']['domain_id'],
+ self.project['domain_id'])
+ ref_child['domain_id'] = self.domain['id']
+ self.assertValidProjectResponse(r, ref_child)
+
def _create_projects_hierarchy(self, hierarchy_size=1):
- """Creates a project hierarchy with specified size.
+ """Creates a single-branched project hierarchy with the specified size.
:param hierarchy_size: the desired hierarchy size, default is 1 -
a project with one child.
@@ -615,9 +561,8 @@ class AssignmentTestCase(test_v3.RestfulTestCase):
:returns projects: a list of the projects in the created hierarchy.
"""
- resp = self.get(
- '/projects/%(project_id)s' % {
- 'project_id': self.project_id})
+ new_ref = self.new_project_ref(domain_id=self.domain_id)
+ resp = self.post('/projects', body={'project': new_ref})
projects = [resp.result]
@@ -633,6 +578,58 @@ class AssignmentTestCase(test_v3.RestfulTestCase):
return projects
+ def test_list_projects_filtering_by_parent_id(self):
+ """Call ``GET /projects?parent_id={project_id}``."""
+ projects = self._create_projects_hierarchy(hierarchy_size=2)
+
+ # Add another child to projects[1] - it will be projects[3]
+ new_ref = self.new_project_ref(
+ domain_id=self.domain_id,
+ parent_id=projects[1]['project']['id'])
+ resp = self.post('/projects',
+ body={'project': new_ref})
+ self.assertValidProjectResponse(resp, new_ref)
+
+ projects.append(resp.result)
+
+ # Query for projects[0] immediate children - it will
+ # be only projects[1]
+ r = self.get(
+ '/projects?parent_id=%(project_id)s' % {
+ 'project_id': projects[0]['project']['id']})
+ self.assertValidProjectListResponse(r)
+
+ projects_result = r.result['projects']
+ expected_list = [projects[1]['project']]
+
+ # projects[0] has projects[1] as child
+ self.assertEqual(expected_list, projects_result)
+
+ # Query for projects[1] immediate children - it will
+ # be projects[2] and projects[3]
+ r = self.get(
+ '/projects?parent_id=%(project_id)s' % {
+ 'project_id': projects[1]['project']['id']})
+ self.assertValidProjectListResponse(r)
+
+ projects_result = r.result['projects']
+ expected_list = [projects[2]['project'], projects[3]['project']]
+
+ # projects[1] has projects[2] and projects[3] as children
+ self.assertEqual(expected_list, projects_result)
+
+ # Query for projects[2] immediate children - it will be an empty list
+ r = self.get(
+ '/projects?parent_id=%(project_id)s' % {
+ 'project_id': projects[2]['project']['id']})
+ self.assertValidProjectListResponse(r)
+
+ projects_result = r.result['projects']
+ expected_list = []
+
+ # projects[2] has no child, projects_result must be an empty list
+ self.assertEqual(expected_list, projects_result)
+
def test_create_hierarchical_project(self):
"""Call ``POST /projects``."""
self._create_projects_hierarchy()
@@ -644,6 +641,22 @@ class AssignmentTestCase(test_v3.RestfulTestCase):
'project_id': self.project_id})
self.assertValidProjectResponse(r, self.project)
+ def test_get_project_with_parents_as_list_with_invalid_id(self):
+ """Call ``GET /projects/{project_id}?parents_as_list``."""
+ self.get('/projects/%(project_id)s?parents_as_list' % {
+ 'project_id': None}, expected_status=404)
+
+ self.get('/projects/%(project_id)s?parents_as_list' % {
+ 'project_id': uuid.uuid4().hex}, expected_status=404)
+
+ def test_get_project_with_subtree_as_list_with_invalid_id(self):
+ """Call ``GET /projects/{project_id}?subtree_as_list``."""
+ self.get('/projects/%(project_id)s?subtree_as_list' % {
+ 'project_id': None}, expected_status=404)
+
+ self.get('/projects/%(project_id)s?subtree_as_list' % {
+ 'project_id': uuid.uuid4().hex}, expected_status=404)
+
def test_get_project_with_parents_as_ids(self):
"""Call ``GET /projects/{project_id}?parents_as_ids``."""
projects = self._create_projects_hierarchy(hierarchy_size=2)
@@ -683,18 +696,66 @@ class AssignmentTestCase(test_v3.RestfulTestCase):
# projects[0] has no parents, parents_as_ids must be None
self.assertIsNone(parents_as_ids)
- def test_get_project_with_parents_as_list(self):
- """Call ``GET /projects/{project_id}?parents_as_list``."""
- projects = self._create_projects_hierarchy(hierarchy_size=2)
+ def test_get_project_with_parents_as_list_with_full_access(self):
+ """``GET /projects/{project_id}?parents_as_list`` with full access.
- r = self.get(
- '/projects/%(project_id)s?parents_as_list' % {
- 'project_id': projects[1]['project']['id']})
+ Test plan:
+ - Create 'parent', 'project' and 'subproject' projects;
+ - Assign a user a role on each one of those projects;
+ - Check that calling parents_as_list on 'subproject' returns both
+ 'project' and 'parent'.
+
+ """
+
+ # Create the project hierarchy
+ parent, project, subproject = self._create_projects_hierarchy(2)
+
+ # Assign a role for the user on all the created projects
+ for proj in (parent, project, subproject):
+ self.put(self.build_role_assignment_link(
+ role_id=self.role_id, user_id=self.user_id,
+ project_id=proj['project']['id']))
+
+ # Make the API call
+ r = self.get('/projects/%(project_id)s?parents_as_list' %
+ {'project_id': subproject['project']['id']})
+ self.assertValidProjectResponse(r, subproject['project'])
+
+ # Assert only 'project' and 'parent' are in the parents list
+ self.assertIn(project, r.result['project']['parents'])
+ self.assertIn(parent, r.result['project']['parents'])
+ self.assertEqual(2, len(r.result['project']['parents']))
+
+ def test_get_project_with_parents_as_list_with_partial_access(self):
+ """``GET /projects/{project_id}?parents_as_list`` with partial access.
+
+ Test plan:
+
+ - Create 'parent', 'project' and 'subproject' projects;
+ - Assign a user a role on 'parent' and 'subproject';
+ - Check that calling parents_as_list on 'subproject' only returns
+ 'parent'.
+
+ """
+
+ # Create the project hierarchy
+ parent, project, subproject = self._create_projects_hierarchy(2)
+
+ # Assign a role for the user on parent and subproject
+ for proj in (parent, subproject):
+ self.put(self.build_role_assignment_link(
+ role_id=self.role_id, user_id=self.user_id,
+ project_id=proj['project']['id']))
+
+ # Make the API call
+ r = self.get('/projects/%(project_id)s?parents_as_list' %
+ {'project_id': subproject['project']['id']})
+ self.assertValidProjectResponse(r, subproject['project'])
+
+ # Assert only 'parent' is in the parents list
+ self.assertIn(parent, r.result['project']['parents'])
self.assertEqual(1, len(r.result['project']['parents']))
- self.assertValidProjectResponse(r, projects[1]['project'])
- self.assertIn(projects[0], r.result['project']['parents'])
- self.assertNotIn(projects[2], r.result['project']['parents'])
def test_get_project_with_parents_as_list_and_parents_as_ids(self):
"""Call ``GET /projects/{project_id}?parents_as_list&parents_as_ids``.
@@ -798,18 +859,65 @@ class AssignmentTestCase(test_v3.RestfulTestCase):
# projects[3] has no subtree, subtree_as_ids must be None
self.assertIsNone(subtree_as_ids)
- def test_get_project_with_subtree_as_list(self):
- """Call ``GET /projects/{project_id}?subtree_as_list``."""
- projects = self._create_projects_hierarchy(hierarchy_size=2)
+ def test_get_project_with_subtree_as_list_with_full_access(self):
+ """``GET /projects/{project_id}?subtree_as_list`` with full access.
- r = self.get(
- '/projects/%(project_id)s?subtree_as_list' % {
- 'project_id': projects[1]['project']['id']})
+ Test plan:
+
+ - Create 'parent', 'project' and 'subproject' projects;
+ - Assign a user a role on each one of those projects;
+ - Check that calling subtree_as_list on 'parent' returns both 'parent'
+ and 'subproject'.
+
+ """
+
+ # Create the project hierarchy
+ parent, project, subproject = self._create_projects_hierarchy(2)
+
+ # Assign a role for the user on all the created projects
+ for proj in (parent, project, subproject):
+ self.put(self.build_role_assignment_link(
+ role_id=self.role_id, user_id=self.user_id,
+ project_id=proj['project']['id']))
+ # Make the API call
+ r = self.get('/projects/%(project_id)s?subtree_as_list' %
+ {'project_id': parent['project']['id']})
+ self.assertValidProjectResponse(r, parent['project'])
+
+ # Assert only 'project' and 'subproject' are in the subtree
+ self.assertIn(project, r.result['project']['subtree'])
+ self.assertIn(subproject, r.result['project']['subtree'])
+ self.assertEqual(2, len(r.result['project']['subtree']))
+
+ def test_get_project_with_subtree_as_list_with_partial_access(self):
+ """``GET /projects/{project_id}?subtree_as_list`` with partial access.
+
+ Test plan:
+
+ - Create 'parent', 'project' and 'subproject' projects;
+ - Assign a user a role on 'parent' and 'subproject';
+ - Check that calling subtree_as_list on 'parent' returns 'subproject'.
+
+ """
+
+ # Create the project hierarchy
+ parent, project, subproject = self._create_projects_hierarchy(2)
+
+ # Assign a role for the user on parent and subproject
+ for proj in (parent, subproject):
+ self.put(self.build_role_assignment_link(
+ role_id=self.role_id, user_id=self.user_id,
+ project_id=proj['project']['id']))
+
+ # Make the API call
+ r = self.get('/projects/%(project_id)s?subtree_as_list' %
+ {'project_id': parent['project']['id']})
+ self.assertValidProjectResponse(r, parent['project'])
+
+ # Assert only 'subproject' is in the subtree
+ self.assertIn(subproject, r.result['project']['subtree'])
self.assertEqual(1, len(r.result['project']['subtree']))
- self.assertValidProjectResponse(r, projects[1]['project'])
- self.assertNotIn(projects[0], r.result['project']['subtree'])
- self.assertIn(projects[2], r.result['project']['subtree'])
def test_get_project_with_subtree_as_list_and_subtree_as_ids(self):
"""Call ``GET /projects/{project_id}?subtree_as_list&subtree_as_ids``.
@@ -859,6 +967,22 @@ class AssignmentTestCase(test_v3.RestfulTestCase):
body={'project': leaf_project},
expected_status=403)
+ def test_update_project_is_domain_not_allowed(self):
+ """Call ``PATCH /projects/{project_id}`` with is_domain.
+
+ The is_domain flag is immutable.
+ """
+ project = self.new_project_ref(domain_id=self.domain['id'])
+ resp = self.post('/projects',
+ body={'project': project})
+ self.assertFalse(resp.result['project']['is_domain'])
+
+ project['is_domain'] = True
+ self.patch('/projects/%(project_id)s' % {
+ 'project_id': resp.result['project']['id']},
+ body={'project': project},
+ expected_status=400)
+
def test_disable_leaf_project(self):
"""Call ``PATCH /projects/{project_id}``."""
projects = self._create_projects_hierarchy()
@@ -920,10 +1044,10 @@ class AssignmentTestCase(test_v3.RestfulTestCase):
def test_delete_not_leaf_project(self):
"""Call ``DELETE /projects/{project_id}``."""
- self._create_projects_hierarchy()
+ projects = self._create_projects_hierarchy()
self.delete(
'/projects/%(project_id)s' % {
- 'project_id': self.project_id},
+ 'project_id': projects[0]['project']['id']},
expected_status=403)
# Role CRUD tests
@@ -967,6 +1091,19 @@ class AssignmentTestCase(test_v3.RestfulTestCase):
self.delete('/roles/%(role_id)s' % {
'role_id': self.role_id})
+ def test_create_member_role(self):
+ """Call ``POST /roles``."""
+ # specify only the name on creation
+ ref = self.new_role_ref()
+ ref['name'] = CONF.member_role_name
+ r = self.post(
+ '/roles',
+ body={'role': ref})
+ self.assertValidRoleResponse(r, ref)
+
+ # but the ID should be set as defined in CONF
+ self.assertEqual(CONF.member_role_id, r.json['role']['id'])
+
# Role Grants tests
def test_crud_user_project_role_grants(self):
@@ -1252,9 +1389,9 @@ class AssignmentTestCase(test_v3.RestfulTestCase):
# Now add one of each of the four types of assignment, making sure
# that we get them all back.
- gd_entity = _build_role_assignment_entity(domain_id=self.domain_id,
- group_id=self.group_id,
- role_id=self.role_id)
+ gd_entity = self.build_role_assignment_entity(domain_id=self.domain_id,
+ group_id=self.group_id,
+ role_id=self.role_id)
self.put(gd_entity['links']['assignment'])
r = self.get(collection_url)
self.assertValidRoleAssignmentListResponse(
@@ -1263,9 +1400,9 @@ class AssignmentTestCase(test_v3.RestfulTestCase):
resource_url=collection_url)
self.assertRoleAssignmentInListResponse(r, gd_entity)
- ud_entity = _build_role_assignment_entity(domain_id=self.domain_id,
- user_id=self.user1['id'],
- role_id=self.role_id)
+ ud_entity = self.build_role_assignment_entity(domain_id=self.domain_id,
+ user_id=self.user1['id'],
+ role_id=self.role_id)
self.put(ud_entity['links']['assignment'])
r = self.get(collection_url)
self.assertValidRoleAssignmentListResponse(
@@ -1274,9 +1411,9 @@ class AssignmentTestCase(test_v3.RestfulTestCase):
resource_url=collection_url)
self.assertRoleAssignmentInListResponse(r, ud_entity)
- gp_entity = _build_role_assignment_entity(project_id=self.project_id,
- group_id=self.group_id,
- role_id=self.role_id)
+ gp_entity = self.build_role_assignment_entity(
+ project_id=self.project_id, group_id=self.group_id,
+ role_id=self.role_id)
self.put(gp_entity['links']['assignment'])
r = self.get(collection_url)
self.assertValidRoleAssignmentListResponse(
@@ -1285,9 +1422,9 @@ class AssignmentTestCase(test_v3.RestfulTestCase):
resource_url=collection_url)
self.assertRoleAssignmentInListResponse(r, gp_entity)
- up_entity = _build_role_assignment_entity(project_id=self.project_id,
- user_id=self.user1['id'],
- role_id=self.role_id)
+ up_entity = self.build_role_assignment_entity(
+ project_id=self.project_id, user_id=self.user1['id'],
+ role_id=self.role_id)
self.put(up_entity['links']['assignment'])
r = self.get(collection_url)
self.assertValidRoleAssignmentListResponse(
@@ -1346,9 +1483,9 @@ class AssignmentTestCase(test_v3.RestfulTestCase):
resource_url=collection_url)
existing_assignments = len(r.result.get('role_assignments'))
- gd_entity = _build_role_assignment_entity(domain_id=self.domain_id,
- group_id=self.group_id,
- role_id=self.role_id)
+ gd_entity = self.build_role_assignment_entity(domain_id=self.domain_id,
+ group_id=self.group_id,
+ role_id=self.role_id)
self.put(gd_entity['links']['assignment'])
r = self.get(collection_url)
self.assertValidRoleAssignmentListResponse(
@@ -1366,11 +1503,11 @@ class AssignmentTestCase(test_v3.RestfulTestCase):
r,
expected_length=existing_assignments + 2,
resource_url=collection_url)
- ud_entity = _build_role_assignment_entity(
+ ud_entity = self.build_role_assignment_entity(
link=gd_entity['links']['assignment'], domain_id=self.domain_id,
user_id=self.user1['id'], role_id=self.role_id)
self.assertRoleAssignmentInListResponse(r, ud_entity)
- ud_entity = _build_role_assignment_entity(
+ ud_entity = self.build_role_assignment_entity(
link=gd_entity['links']['assignment'], domain_id=self.domain_id,
user_id=self.user2['id'], role_id=self.role_id)
self.assertRoleAssignmentInListResponse(r, ud_entity)
@@ -1420,9 +1557,9 @@ class AssignmentTestCase(test_v3.RestfulTestCase):
resource_url=collection_url)
existing_assignments = len(r.result.get('role_assignments'))
- gd_entity = _build_role_assignment_entity(domain_id=self.domain_id,
- group_id=self.group_id,
- role_id=self.role_id)
+ gd_entity = self.build_role_assignment_entity(domain_id=self.domain_id,
+ group_id=self.group_id,
+ role_id=self.role_id)
self.put(gd_entity['links']['assignment'])
r = self.get(collection_url)
self.assertValidRoleAssignmentListResponse(
@@ -1516,22 +1653,22 @@ class AssignmentTestCase(test_v3.RestfulTestCase):
# Now add one of each of the four types of assignment
- gd_entity = _build_role_assignment_entity(domain_id=self.domain_id,
- group_id=self.group1['id'],
- role_id=self.role1['id'])
+ gd_entity = self.build_role_assignment_entity(
+ domain_id=self.domain_id, group_id=self.group1['id'],
+ role_id=self.role1['id'])
self.put(gd_entity['links']['assignment'])
- ud_entity = _build_role_assignment_entity(domain_id=self.domain_id,
- user_id=self.user1['id'],
- role_id=self.role2['id'])
+ ud_entity = self.build_role_assignment_entity(domain_id=self.domain_id,
+ user_id=self.user1['id'],
+ role_id=self.role2['id'])
self.put(ud_entity['links']['assignment'])
- gp_entity = _build_role_assignment_entity(
+ gp_entity = self.build_role_assignment_entity(
project_id=self.project1['id'], group_id=self.group1['id'],
role_id=self.role1['id'])
self.put(gp_entity['links']['assignment'])
- up_entity = _build_role_assignment_entity(
+ up_entity = self.build_role_assignment_entity(
project_id=self.project1['id'], user_id=self.user1['id'],
role_id=self.role2['id'])
self.put(up_entity['links']['assignment'])
@@ -1607,17 +1744,17 @@ class AssignmentTestCase(test_v3.RestfulTestCase):
self.assertRoleAssignmentInListResponse(r, up_entity)
self.assertRoleAssignmentInListResponse(r, ud_entity)
# ...and the two via group membership...
- gp1_link = _build_role_assignment_link(project_id=self.project1['id'],
- group_id=self.group1['id'],
- role_id=self.role1['id'])
- gd1_link = _build_role_assignment_link(domain_id=self.domain_id,
- group_id=self.group1['id'],
- role_id=self.role1['id'])
-
- up1_entity = _build_role_assignment_entity(
+ gp1_link = self.build_role_assignment_link(
+ project_id=self.project1['id'], group_id=self.group1['id'],
+ role_id=self.role1['id'])
+ gd1_link = self.build_role_assignment_link(domain_id=self.domain_id,
+ group_id=self.group1['id'],
+ role_id=self.role1['id'])
+
+ up1_entity = self.build_role_assignment_entity(
link=gp1_link, project_id=self.project1['id'],
user_id=self.user1['id'], role_id=self.role1['id'])
- ud1_entity = _build_role_assignment_entity(
+ ud1_entity = self.build_role_assignment_entity(
link=gd1_link, domain_id=self.domain_id, user_id=self.user1['id'],
role_id=self.role1['id'])
self.assertRoleAssignmentInListResponse(r, up1_entity)
@@ -1641,7 +1778,8 @@ class AssignmentTestCase(test_v3.RestfulTestCase):
self.assertRoleAssignmentInListResponse(r, up1_entity)
-class RoleAssignmentBaseTestCase(test_v3.RestfulTestCase):
+class RoleAssignmentBaseTestCase(test_v3.RestfulTestCase,
+ test_v3.AssignmentTestMixin):
"""Base class for testing /v3/role_assignments API behavior."""
MAX_HIERARCHY_BREADTH = 3
@@ -1665,8 +1803,8 @@ class RoleAssignmentBaseTestCase(test_v3.RestfulTestCase):
for i in range(breadth):
subprojects.append(self.new_project_ref(
domain_id=self.domain_id, parent_id=parent_id))
- self.assignment_api.create_project(subprojects[-1]['id'],
- subprojects[-1])
+ self.resource_api.create_project(subprojects[-1]['id'],
+ subprojects[-1])
new_parent = subprojects[random.randint(0, breadth - 1)]
create_project_hierarchy(new_parent['id'], depth - 1)
@@ -1676,12 +1814,12 @@ class RoleAssignmentBaseTestCase(test_v3.RestfulTestCase):
# Create a domain
self.domain = self.new_domain_ref()
self.domain_id = self.domain['id']
- self.assignment_api.create_domain(self.domain_id, self.domain)
+ self.resource_api.create_domain(self.domain_id, self.domain)
# Create a project hierarchy
self.project = self.new_project_ref(domain_id=self.domain_id)
self.project_id = self.project['id']
- self.assignment_api.create_project(self.project_id, self.project)
+ self.resource_api.create_project(self.project_id, self.project)
# Create a random project hierarchy
create_project_hierarchy(self.project_id,
@@ -1714,7 +1852,7 @@ class RoleAssignmentBaseTestCase(test_v3.RestfulTestCase):
# Create a role
self.role = self.new_role_ref()
self.role_id = self.role['id']
- self.assignment_api.create_role(self.role_id, self.role)
+ self.role_api.create_role(self.role_id, self.role)
# Set default user and group to be used on tests
self.default_user_id = self.user_ids[0]
@@ -1748,7 +1886,7 @@ class RoleAssignmentBaseTestCase(test_v3.RestfulTestCase):
:returns: role assignments query URL.
"""
- return _build_role_assignment_query_url(**filters)
+ return self.build_role_assignment_query_url(**filters)
class RoleAssignmentFailureTestCase(RoleAssignmentBaseTestCase):
@@ -1869,7 +2007,7 @@ class RoleAssignmentDirectTestCase(RoleAssignmentBaseTestCase):
:returns: the list of the expected role assignments.
"""
- return [_build_role_assignment_entity(**filters)]
+ return [self.build_role_assignment_entity(**filters)]
# Test cases below call the generic test method, providing different filter
# combinations. Filters are provided as specified in the method name, after
@@ -1980,8 +2118,8 @@ class RoleAssignmentEffectiveTestCase(RoleAssignmentInheritedTestCase):
query_filters.pop('domain_id', None)
query_filters.pop('project_id', None)
- return _build_role_assignment_query_url(effective=True,
- **query_filters)
+ return self.build_role_assignment_query_url(effective=True,
+ **query_filters)
def _list_expected_role_assignments(self, **filters):
"""Given the filters, it returns expected direct role assignments.
@@ -1995,7 +2133,7 @@ class RoleAssignmentEffectiveTestCase(RoleAssignmentInheritedTestCase):
"""
# Get assignment link, to be put on 'links': {'assignment': link}
- assignment_link = _build_role_assignment_link(**filters)
+ assignment_link = self.build_role_assignment_link(**filters)
# Expand group membership
user_ids = [None]
@@ -2010,11 +2148,11 @@ class RoleAssignmentEffectiveTestCase(RoleAssignmentInheritedTestCase):
project_ids = [None]
if filters.get('domain_id'):
project_ids = [project['id'] for project in
- self.assignment_api.list_projects_in_domain(
+ self.resource_api.list_projects_in_domain(
filters.pop('domain_id'))]
else:
project_ids = [project['id'] for project in
- self.assignment_api.list_projects_in_subtree(
+ self.resource_api.list_projects_in_subtree(
self.project_id)]
# Compute expected role assignments
@@ -2023,13 +2161,14 @@ class RoleAssignmentEffectiveTestCase(RoleAssignmentInheritedTestCase):
filters['project_id'] = project_id
for user_id in user_ids:
filters['user_id'] = user_id
- assignments.append(_build_role_assignment_entity(
+ assignments.append(self.build_role_assignment_entity(
link=assignment_link, **filters))
return assignments
-class AssignmentInheritanceTestCase(test_v3.RestfulTestCase):
+class AssignmentInheritanceTestCase(test_v3.RestfulTestCase,
+ test_v3.AssignmentTestMixin):
"""Test inheritance crud and its effects."""
def config_overrides(self):
@@ -2058,7 +2197,7 @@ class AssignmentInheritanceTestCase(test_v3.RestfulTestCase):
self.v3_authenticate_token(project_auth_data, expected_status=401)
# Grant non-inherited role for user on domain
- non_inher_ud_link = _build_role_assignment_link(
+ non_inher_ud_link = self.build_role_assignment_link(
domain_id=self.domain_id, user_id=user['id'], role_id=self.role_id)
self.put(non_inher_ud_link)
@@ -2071,7 +2210,7 @@ class AssignmentInheritanceTestCase(test_v3.RestfulTestCase):
self.role_api.create_role(inherited_role['id'], inherited_role)
# Grant inherited role for user on domain
- inher_ud_link = _build_role_assignment_link(
+ inher_ud_link = self.build_role_assignment_link(
domain_id=self.domain_id, user_id=user['id'],
role_id=inherited_role['id'], inherited_to_projects=True)
self.put(inher_ud_link)
@@ -2120,7 +2259,7 @@ class AssignmentInheritanceTestCase(test_v3.RestfulTestCase):
self.v3_authenticate_token(project_auth_data, expected_status=401)
# Grant non-inherited role for user on domain
- non_inher_gd_link = _build_role_assignment_link(
+ non_inher_gd_link = self.build_role_assignment_link(
domain_id=self.domain_id, user_id=user['id'], role_id=self.role_id)
self.put(non_inher_gd_link)
@@ -2133,7 +2272,7 @@ class AssignmentInheritanceTestCase(test_v3.RestfulTestCase):
self.role_api.create_role(inherited_role['id'], inherited_role)
# Grant inherited role for user on domain
- inher_gd_link = _build_role_assignment_link(
+ inher_gd_link = self.build_role_assignment_link(
domain_id=self.domain_id, user_id=user['id'],
role_id=inherited_role['id'], inherited_to_projects=True)
self.put(inher_gd_link)
@@ -2155,6 +2294,48 @@ class AssignmentInheritanceTestCase(test_v3.RestfulTestCase):
# Check the user cannot get a domain token anymore
self.v3_authenticate_token(domain_auth_data, expected_status=401)
+ def _test_crud_inherited_and_direct_assignment_on_target(self, target_url):
+ # Create a new role to avoid assignments loaded from sample data
+ role = self.new_role_ref()
+ self.role_api.create_role(role['id'], role)
+
+ # Define URLs
+ direct_url = '%s/users/%s/roles/%s' % (
+ target_url, self.user_id, role['id'])
+ inherited_url = '/OS-INHERIT/%s/inherited_to_projects' % direct_url
+
+ # Create the direct assignment
+ self.put(direct_url)
+ # Check the direct assignment exists, but the inherited one does not
+ self.head(direct_url)
+ self.head(inherited_url, expected_status=404)
+
+ # Now add the inherited assignment
+ self.put(inherited_url)
+ # Check both the direct and inherited assignment exist
+ self.head(direct_url)
+ self.head(inherited_url)
+
+ # Delete indirect assignment
+ self.delete(inherited_url)
+ # Check the direct assignment exists, but the inherited one does not
+ self.head(direct_url)
+ self.head(inherited_url, expected_status=404)
+
+ # Now delete the inherited assignment
+ self.delete(direct_url)
+ # Check that none of them exist
+ self.head(direct_url, expected_status=404)
+ self.head(inherited_url, expected_status=404)
+
+ def test_crud_inherited_and_direct_assignment_on_domains(self):
+ self._test_crud_inherited_and_direct_assignment_on_target(
+ '/domains/%s' % self.domain_id)
+
+ def test_crud_inherited_and_direct_assignment_on_projects(self):
+ self._test_crud_inherited_and_direct_assignment_on_target(
+ '/projects/%s' % self.project_id)
+
def test_crud_user_inherited_domain_role_grants(self):
role_list = []
for _ in range(2):
@@ -2260,7 +2441,7 @@ class AssignmentInheritanceTestCase(test_v3.RestfulTestCase):
self.assertValidRoleAssignmentListResponse(r,
expected_length=1,
resource_url=collection_url)
- ud_entity = _build_role_assignment_entity(
+ ud_entity = self.build_role_assignment_entity(
domain_id=domain['id'], user_id=user1['id'],
role_id=role_list[3]['id'], inherited_to_projects=True)
self.assertRoleAssignmentInListResponse(r, ud_entity)
@@ -2279,14 +2460,13 @@ class AssignmentInheritanceTestCase(test_v3.RestfulTestCase):
resource_url=collection_url)
# An effective role for an inherited role will be a project
# entity, with a domain link to the inherited assignment
- ud_url = _build_role_assignment_link(
+ ud_url = self.build_role_assignment_link(
domain_id=domain['id'], user_id=user1['id'],
role_id=role_list[3]['id'], inherited_to_projects=True)
- up_entity = _build_role_assignment_entity(link=ud_url,
- project_id=project1['id'],
- user_id=user1['id'],
- role_id=role_list[3]['id'],
- inherited_to_projects=True)
+ up_entity = self.build_role_assignment_entity(
+ link=ud_url, project_id=project1['id'],
+ user_id=user1['id'], role_id=role_list[3]['id'],
+ inherited_to_projects=True)
self.assertRoleAssignmentInListResponse(r, up_entity)
def test_list_role_assignments_for_disabled_inheritance_extension(self):
@@ -2360,14 +2540,13 @@ class AssignmentInheritanceTestCase(test_v3.RestfulTestCase):
expected_length=3,
resource_url=collection_url)
- ud_url = _build_role_assignment_link(
+ ud_url = self.build_role_assignment_link(
domain_id=domain['id'], user_id=user1['id'],
role_id=role_list[3]['id'], inherited_to_projects=True)
- up_entity = _build_role_assignment_entity(link=ud_url,
- project_id=project1['id'],
- user_id=user1['id'],
- role_id=role_list[3]['id'],
- inherited_to_projects=True)
+ up_entity = self.build_role_assignment_entity(
+ link=ud_url, project_id=project1['id'],
+ user_id=user1['id'], role_id=role_list[3]['id'],
+ inherited_to_projects=True)
self.assertRoleAssignmentInListResponse(r, up_entity)
@@ -2463,7 +2642,7 @@ class AssignmentInheritanceTestCase(test_v3.RestfulTestCase):
self.assertValidRoleAssignmentListResponse(r,
expected_length=1,
resource_url=collection_url)
- gd_entity = _build_role_assignment_entity(
+ gd_entity = self.build_role_assignment_entity(
domain_id=domain['id'], group_id=group1['id'],
role_id=role_list[3]['id'], inherited_to_projects=True)
self.assertRoleAssignmentInListResponse(r, gd_entity)
@@ -2482,7 +2661,7 @@ class AssignmentInheritanceTestCase(test_v3.RestfulTestCase):
resource_url=collection_url)
# An effective role for an inherited role will be a project
# entity, with a domain link to the inherited assignment
- up_entity = _build_role_assignment_entity(
+ up_entity = self.build_role_assignment_entity(
link=gd_entity['links']['assignment'], project_id=project1['id'],
user_id=user1['id'], role_id=role_list[3]['id'],
inherited_to_projects=True)
@@ -2573,10 +2752,10 @@ class AssignmentInheritanceTestCase(test_v3.RestfulTestCase):
self.assertValidRoleAssignmentListResponse(r,
expected_length=2,
resource_url=collection_url)
- ud_entity = _build_role_assignment_entity(
+ ud_entity = self.build_role_assignment_entity(
domain_id=domain['id'], user_id=user1['id'],
role_id=role_list[3]['id'], inherited_to_projects=True)
- gd_entity = _build_role_assignment_entity(
+ gd_entity = self.build_role_assignment_entity(
domain_id=domain['id'], group_id=group1['id'],
role_id=role_list[4]['id'], inherited_to_projects=True)
self.assertRoleAssignmentInListResponse(r, ud_entity)
@@ -2626,7 +2805,7 @@ class AssignmentInheritanceTestCase(test_v3.RestfulTestCase):
self.v3_authenticate_token(leaf_project_auth_data, expected_status=401)
# Grant non-inherited role for user on leaf project
- non_inher_up_link = _build_role_assignment_link(
+ non_inher_up_link = self.build_role_assignment_link(
project_id=leaf_id, user_id=self.user['id'],
role_id=non_inherited_role_id)
self.put(non_inher_up_link)
@@ -2636,7 +2815,7 @@ class AssignmentInheritanceTestCase(test_v3.RestfulTestCase):
self.v3_authenticate_token(leaf_project_auth_data)
# Grant inherited role for user on root project
- inher_up_link = _build_role_assignment_link(
+ inher_up_link = self.build_role_assignment_link(
project_id=root_id, user_id=self.user['id'],
role_id=inherited_role_id, inherited_to_projects=True)
self.put(inher_up_link)
@@ -2683,7 +2862,7 @@ class AssignmentInheritanceTestCase(test_v3.RestfulTestCase):
self.v3_authenticate_token(leaf_project_auth_data, expected_status=401)
# Grant non-inherited role for group on leaf project
- non_inher_gp_link = _build_role_assignment_link(
+ non_inher_gp_link = self.build_role_assignment_link(
project_id=leaf_id, group_id=group['id'],
role_id=non_inherited_role_id)
self.put(non_inher_gp_link)
@@ -2693,7 +2872,7 @@ class AssignmentInheritanceTestCase(test_v3.RestfulTestCase):
self.v3_authenticate_token(leaf_project_auth_data)
# Grant inherited role for group on root project
- inher_gp_link = _build_role_assignment_link(
+ inher_gp_link = self.build_role_assignment_link(
project_id=root_id, group_id=group['id'],
role_id=inherited_role_id, inherited_to_projects=True)
self.put(inher_gp_link)
@@ -2732,13 +2911,13 @@ class AssignmentInheritanceTestCase(test_v3.RestfulTestCase):
self._setup_hierarchical_projects_scenario())
# Grant non-inherited role
- non_inher_up_entity = _build_role_assignment_entity(
+ non_inher_up_entity = self.build_role_assignment_entity(
project_id=root_id, user_id=self.user['id'],
role_id=non_inherited_role_id)
self.put(non_inher_up_entity['links']['assignment'])
# Grant inherited role
- inher_up_entity = _build_role_assignment_entity(
+ inher_up_entity = self.build_role_assignment_entity(
project_id=root_id, user_id=self.user['id'],
role_id=inherited_role_id, inherited_to_projects=True)
self.put(inher_up_entity['links']['assignment'])
@@ -2756,7 +2935,7 @@ class AssignmentInheritanceTestCase(test_v3.RestfulTestCase):
self.assertRoleAssignmentInListResponse(r, inher_up_entity)
# Assert that the user does not have non-inherited role on leaf project
- non_inher_up_entity = _build_role_assignment_entity(
+ non_inher_up_entity = self.build_role_assignment_entity(
project_id=leaf_id, user_id=self.user['id'],
role_id=non_inherited_role_id)
self.assertRoleAssignmentNotInListResponse(r, non_inher_up_entity)
@@ -2784,13 +2963,13 @@ class AssignmentInheritanceTestCase(test_v3.RestfulTestCase):
self._setup_hierarchical_projects_scenario())
# Grant non-inherited role
- non_inher_up_entity = _build_role_assignment_entity(
+ non_inher_up_entity = self.build_role_assignment_entity(
project_id=root_id, user_id=self.user['id'],
role_id=non_inherited_role_id)
self.put(non_inher_up_entity['links']['assignment'])
# Grant inherited role
- inher_up_entity = _build_role_assignment_entity(
+ inher_up_entity = self.build_role_assignment_entity(
project_id=root_id, user_id=self.user['id'],
role_id=inherited_role_id, inherited_to_projects=True)
self.put(inher_up_entity['links']['assignment'])
@@ -2808,7 +2987,7 @@ class AssignmentInheritanceTestCase(test_v3.RestfulTestCase):
self.assertRoleAssignmentNotInListResponse(r, inher_up_entity)
# Assert that the user does not have non-inherited role on leaf project
- non_inher_up_entity = _build_role_assignment_entity(
+ non_inher_up_entity = self.build_role_assignment_entity(
project_id=leaf_id, user_id=self.user['id'],
role_id=non_inherited_role_id)
self.assertRoleAssignmentNotInListResponse(r, non_inher_up_entity)
@@ -2835,13 +3014,13 @@ class AssignmentInheritanceTestCase(test_v3.RestfulTestCase):
self._setup_hierarchical_projects_scenario())
# Grant non-inherited role
- non_inher_up_entity = _build_role_assignment_entity(
+ non_inher_up_entity = self.build_role_assignment_entity(
project_id=root_id, user_id=self.user['id'],
role_id=non_inherited_role_id)
self.put(non_inher_up_entity['links']['assignment'])
# Grant inherited role
- inher_up_entity = _build_role_assignment_entity(
+ inher_up_entity = self.build_role_assignment_entity(
project_id=root_id, user_id=self.user['id'],
role_id=inherited_role_id, inherited_to_projects=True)
self.put(inher_up_entity['links']['assignment'])
@@ -2860,7 +3039,7 @@ class AssignmentInheritanceTestCase(test_v3.RestfulTestCase):
self.assertRoleAssignmentInListResponse(r, inher_up_entity)
# Assert that the user does not have non-inherited role on leaf project
- non_inher_up_entity = _build_role_assignment_entity(
+ non_inher_up_entity = self.build_role_assignment_entity(
project_id=leaf_id, user_id=self.user['id'],
role_id=non_inherited_role_id)
self.assertRoleAssignmentNotInListResponse(r, non_inher_up_entity)
@@ -2898,11 +3077,32 @@ class AssignmentInheritanceDisabledTestCase(test_v3.RestfulTestCase):
class AssignmentV3toV2MethodsTestCase(tests.TestCase):
"""Test domain V3 to V2 conversion methods."""
+ def _setup_initial_projects(self):
+ self.project_id = uuid.uuid4().hex
+ self.domain_id = CONF.identity.default_domain_id
+ self.parent_id = uuid.uuid4().hex
+ # Project with only domain_id in ref
+ self.project1 = {'id': self.project_id,
+ 'name': self.project_id,
+ 'domain_id': self.domain_id}
+ # Project with both domain_id and parent_id in ref
+ self.project2 = {'id': self.project_id,
+ 'name': self.project_id,
+ 'domain_id': self.domain_id,
+ 'parent_id': self.parent_id}
+ # Project with no domain_id and parent_id in ref
+ self.project3 = {'id': self.project_id,
+ 'name': self.project_id,
+ 'domain_id': self.domain_id,
+ 'parent_id': self.parent_id}
+ # Expected result with no domain_id and parent_id
+ self.expected_project = {'id': self.project_id,
+ 'name': self.project_id}
def test_v2controller_filter_domain_id(self):
# V2.0 is not domain aware, ensure domain_id is popped off the ref.
other_data = uuid.uuid4().hex
- domain_id = uuid.uuid4().hex
+ domain_id = CONF.identity.default_domain_id
ref = {'domain_id': domain_id,
'other_data': other_data}
@@ -2941,3 +3141,52 @@ class AssignmentV3toV2MethodsTestCase(tests.TestCase):
self.assertRaises(exception.Unauthorized,
controller.V2Controller.filter_domain,
non_default_domain_ref)
+
+ def test_v2controller_filter_project_parent_id(self):
+ # V2.0 is not project hierarchy aware, ensure parent_id is popped off.
+ other_data = uuid.uuid4().hex
+ parent_id = uuid.uuid4().hex
+ ref = {'parent_id': parent_id,
+ 'other_data': other_data}
+
+ ref_no_parent = {'other_data': other_data}
+ expected_ref = ref_no_parent.copy()
+
+ updated_ref = controller.V2Controller.filter_project_parent_id(ref)
+ self.assertIs(ref, updated_ref)
+ self.assertDictEqual(ref, expected_ref)
+ # Make sure we don't error/muck up data if parent_id isn't present
+ updated_ref = controller.V2Controller.filter_project_parent_id(
+ ref_no_parent)
+ self.assertIs(ref_no_parent, updated_ref)
+ self.assertDictEqual(ref_no_parent, expected_ref)
+
+ def test_v3_to_v2_project_method(self):
+ self._setup_initial_projects()
+ updated_project1 = controller.V2Controller.v3_to_v2_project(
+ self.project1)
+ self.assertIs(self.project1, updated_project1)
+ self.assertDictEqual(self.project1, self.expected_project)
+ updated_project2 = controller.V2Controller.v3_to_v2_project(
+ self.project2)
+ self.assertIs(self.project2, updated_project2)
+ self.assertDictEqual(self.project2, self.expected_project)
+ updated_project3 = controller.V2Controller.v3_to_v2_project(
+ self.project3)
+ self.assertIs(self.project3, updated_project3)
+ self.assertDictEqual(self.project3, self.expected_project)
+
+ def test_v3_to_v2_project_method_list(self):
+ self._setup_initial_projects()
+ project_list = [self.project1, self.project2, self.project3]
+ updated_list = controller.V2Controller.v3_to_v2_project(project_list)
+
+ self.assertEqual(len(updated_list), len(project_list))
+
+ for i, ref in enumerate(updated_list):
+ # Order should not change.
+ self.assertIs(ref, project_list[i])
+
+ self.assertDictEqual(self.project1, self.expected_project)
+ self.assertDictEqual(self.project2, self.expected_project)
+ self.assertDictEqual(self.project3, self.expected_project)
diff --git a/keystone-moon/keystone/tests/unit/test_v3_auth.py b/keystone-moon/keystone/tests/unit/test_v3_auth.py
index ec079170..96f0ff1f 100644
--- a/keystone-moon/keystone/tests/unit/test_v3_auth.py
+++ b/keystone-moon/keystone/tests/unit/test_v3_auth.py
@@ -22,18 +22,18 @@ from keystoneclient.common import cms
import mock
from oslo_config import cfg
from oslo_utils import timeutils
-import six
+from six.moves import range
from testtools import matchers
from testtools import testcase
from keystone import auth
+from keystone.common import utils
from keystone import exception
from keystone.policy.backends import rules
from keystone.tests import unit as tests
from keystone.tests.unit import ksfixtures
from keystone.tests.unit import test_v3
-
CONF = cfg.CONF
@@ -97,8 +97,8 @@ class TestAuthInfo(test_v3.AuthTestMixin, testcase.TestCase):
'password', 'password']
context = None
auth_info = auth.controllers.AuthInfo.create(context, auth_data)
- self.assertEqual(auth_info.get_method_names(),
- ['password', 'token'])
+ self.assertEqual(['password', 'token'],
+ auth_info.get_method_names())
def test_get_method_data_invalid_method(self):
auth_data = self.build_authentication_request(
@@ -114,276 +114,294 @@ class TestAuthInfo(test_v3.AuthTestMixin, testcase.TestCase):
class TokenAPITests(object):
- # Why is this not just setUP? Because TokenAPITests is not a test class
+ # Why is this not just setUp? Because TokenAPITests is not a test class
# itself. If TokenAPITests became a subclass of the testcase, it would get
# called by the enumerate-tests-in-file code. The way the functions get
# resolved in Python for multiple inheritance means that a setUp in this
# would get skipped by the testrunner.
def doSetUp(self):
- auth_data = self.build_authentication_request(
+ r = self.v3_authenticate_token(self.build_authentication_request(
username=self.user['name'],
user_domain_id=self.domain_id,
- password=self.user['password'])
- resp = self.v3_authenticate_token(auth_data)
- self.token_data = resp.result
- self.token = resp.headers.get('X-Subject-Token')
- self.headers = {'X-Subject-Token': resp.headers.get('X-Subject-Token')}
+ password=self.user['password']))
+ self.v3_token_data = r.result
+ self.v3_token = r.headers.get('X-Subject-Token')
+ self.headers = {'X-Subject-Token': r.headers.get('X-Subject-Token')}
def test_default_fixture_scope_token(self):
self.assertIsNotNone(self.get_scoped_token())
- def verify_token(self, *args, **kwargs):
- return cms.verify_token(*args, **kwargs)
-
- def test_v3_token_id(self):
- auth_data = self.build_authentication_request(
- user_id=self.user['id'],
- password=self.user['password'])
- resp = self.v3_authenticate_token(auth_data)
- token_data = resp.result
- token_id = resp.headers.get('X-Subject-Token')
- self.assertIn('expires_at', token_data['token'])
-
- decoded_token = self.verify_token(token_id, CONF.signing.certfile,
- CONF.signing.ca_certs)
- decoded_token_dict = json.loads(decoded_token)
-
- token_resp_dict = json.loads(resp.body)
-
- self.assertEqual(decoded_token_dict, token_resp_dict)
- # should be able to validate hash PKI token as well
- hash_token_id = cms.cms_hash_token(token_id)
- headers = {'X-Subject-Token': hash_token_id}
- resp = self.get('/auth/tokens', headers=headers)
- expected_token_data = resp.result
- self.assertDictEqual(expected_token_data, token_data)
-
def test_v3_v2_intermix_non_default_domain_failed(self):
- auth_data = self.build_authentication_request(
+ v3_token = self.get_requested_token(self.build_authentication_request(
user_id=self.user['id'],
- password=self.user['password'])
- token = self.get_requested_token(auth_data)
+ password=self.user['password']))
# now validate the v3 token with v2 API
- path = '/v2.0/tokens/%s' % (token)
- self.admin_request(path=path,
- token='ADMIN',
- method='GET',
- expected_status=401)
+ self.admin_request(
+ path='/v2.0/tokens/%s' % v3_token,
+ token=CONF.admin_token,
+ method='GET',
+ expected_status=401)
def test_v3_v2_intermix_new_default_domain(self):
# If the default_domain_id config option is changed, then should be
# able to validate a v3 token with user in the new domain.
# 1) Create a new domain for the user.
- new_domain_id = uuid.uuid4().hex
new_domain = {
'description': uuid.uuid4().hex,
'enabled': True,
- 'id': new_domain_id,
+ 'id': uuid.uuid4().hex,
'name': uuid.uuid4().hex,
}
-
- self.resource_api.create_domain(new_domain_id, new_domain)
+ self.resource_api.create_domain(new_domain['id'], new_domain)
# 2) Create user in new domain.
new_user_password = uuid.uuid4().hex
new_user = {
'name': uuid.uuid4().hex,
- 'domain_id': new_domain_id,
+ 'domain_id': new_domain['id'],
'password': new_user_password,
'email': uuid.uuid4().hex,
}
-
new_user = self.identity_api.create_user(new_user)
# 3) Update the default_domain_id config option to the new domain
+ self.config_fixture.config(
+ group='identity',
+ default_domain_id=new_domain['id'])
- self.config_fixture.config(group='identity',
- default_domain_id=new_domain_id)
-
- # 4) Get a token using v3 api.
-
- auth_data = self.build_authentication_request(
+ # 4) Get a token using v3 API.
+ v3_token = self.get_requested_token(self.build_authentication_request(
user_id=new_user['id'],
- password=new_user_password)
- token = self.get_requested_token(auth_data)
+ password=new_user_password))
- # 5) Authenticate token using v2 api.
-
- path = '/v2.0/tokens/%s' % (token)
- self.admin_request(path=path,
- token='ADMIN',
- method='GET')
+ # 5) Validate token using v2 API.
+ self.admin_request(
+ path='/v2.0/tokens/%s' % v3_token,
+ token=CONF.admin_token,
+ method='GET')
def test_v3_v2_intermix_domain_scoped_token_failed(self):
# grant the domain role to user
- path = '/domains/%s/users/%s/roles/%s' % (
- self.domain['id'], self.user['id'], self.role['id'])
- self.put(path=path)
- auth_data = self.build_authentication_request(
+ self.put(
+ path='/domains/%s/users/%s/roles/%s' % (
+ self.domain['id'], self.user['id'], self.role['id']))
+
+ # generate a domain-scoped v3 token
+ v3_token = self.get_requested_token(self.build_authentication_request(
user_id=self.user['id'],
password=self.user['password'],
- domain_id=self.domain['id'])
- token = self.get_requested_token(auth_data)
+ domain_id=self.domain['id']))
- # now validate the v3 token with v2 API
- path = '/v2.0/tokens/%s' % (token)
- self.admin_request(path=path,
- token='ADMIN',
- method='GET',
- expected_status=401)
+ # domain-scoped tokens are not supported by v2
+ self.admin_request(
+ method='GET',
+ path='/v2.0/tokens/%s' % v3_token,
+ token=CONF.admin_token,
+ expected_status=401)
def test_v3_v2_intermix_non_default_project_failed(self):
- auth_data = self.build_authentication_request(
+ # self.project is in a non-default domain
+ v3_token = self.get_requested_token(self.build_authentication_request(
user_id=self.default_domain_user['id'],
password=self.default_domain_user['password'],
- project_id=self.project['id'])
- token = self.get_requested_token(auth_data)
+ project_id=self.project['id']))
- # now validate the v3 token with v2 API
- path = '/v2.0/tokens/%s' % (token)
- self.admin_request(path=path,
- token='ADMIN',
- method='GET',
- expected_status=401)
+ # v2 cannot reference projects outside the default domain
+ self.admin_request(
+ method='GET',
+ path='/v2.0/tokens/%s' % v3_token,
+ token=CONF.admin_token,
+ expected_status=401)
+
+ def test_v3_v2_intermix_non_default_user_failed(self):
+ self.assignment_api.create_grant(
+ self.role['id'],
+ user_id=self.user['id'],
+ project_id=self.default_domain_project['id'])
+
+ # self.user is in a non-default domain
+ v3_token = self.get_requested_token(self.build_authentication_request(
+ user_id=self.user['id'],
+ password=self.user['password'],
+ project_id=self.default_domain_project['id']))
+
+ # v2 cannot reference projects outside the default domain
+ self.admin_request(
+ method='GET',
+ path='/v2.0/tokens/%s' % v3_token,
+ token=CONF.admin_token,
+ expected_status=401)
+
+ def test_v3_v2_intermix_domain_scope_failed(self):
+ self.assignment_api.create_grant(
+ self.role['id'],
+ user_id=self.default_domain_user['id'],
+ domain_id=self.domain['id'])
+
+ v3_token = self.get_requested_token(self.build_authentication_request(
+ user_id=self.default_domain_user['id'],
+ password=self.default_domain_user['password'],
+ domain_id=self.domain['id']))
+
+ # v2 cannot reference projects outside the default domain
+ self.admin_request(
+ path='/v2.0/tokens/%s' % v3_token,
+ token=CONF.admin_token,
+ method='GET',
+ expected_status=401)
def test_v3_v2_unscoped_token_intermix(self):
- auth_data = self.build_authentication_request(
+ r = self.v3_authenticate_token(self.build_authentication_request(
user_id=self.default_domain_user['id'],
- password=self.default_domain_user['password'])
- resp = self.v3_authenticate_token(auth_data)
- token_data = resp.result
- token = resp.headers.get('X-Subject-Token')
+ password=self.default_domain_user['password']))
+ self.assertValidUnscopedTokenResponse(r)
+ v3_token_data = r.result
+ v3_token = r.headers.get('X-Subject-Token')
# now validate the v3 token with v2 API
- path = '/v2.0/tokens/%s' % (token)
- resp = self.admin_request(path=path,
- token='ADMIN',
- method='GET')
- v2_token = resp.result
- self.assertEqual(v2_token['access']['user']['id'],
- token_data['token']['user']['id'])
+ r = self.admin_request(
+ path='/v2.0/tokens/%s' % v3_token,
+ token=CONF.admin_token,
+ method='GET')
+ v2_token_data = r.result
+
+ self.assertEqual(v2_token_data['access']['user']['id'],
+ v3_token_data['token']['user']['id'])
# v2 token time has not fraction of second precision so
# just need to make sure the non fraction part agrees
- self.assertIn(v2_token['access']['token']['expires'][:-1],
- token_data['token']['expires_at'])
+ self.assertIn(v2_token_data['access']['token']['expires'][:-1],
+ v3_token_data['token']['expires_at'])
def test_v3_v2_token_intermix(self):
# FIXME(gyee): PKI tokens are not interchangeable because token
# data is baked into the token itself.
- auth_data = self.build_authentication_request(
+ r = self.v3_authenticate_token(self.build_authentication_request(
user_id=self.default_domain_user['id'],
password=self.default_domain_user['password'],
- project_id=self.default_domain_project['id'])
- resp = self.v3_authenticate_token(auth_data)
- token_data = resp.result
- token = resp.headers.get('X-Subject-Token')
+ project_id=self.default_domain_project['id']))
+ self.assertValidProjectScopedTokenResponse(r)
+ v3_token_data = r.result
+ v3_token = r.headers.get('X-Subject-Token')
# now validate the v3 token with v2 API
- path = '/v2.0/tokens/%s' % (token)
- resp = self.admin_request(path=path,
- token='ADMIN',
- method='GET')
- v2_token = resp.result
- self.assertEqual(v2_token['access']['user']['id'],
- token_data['token']['user']['id'])
- # v2 token time has not fraction of second precision so
- # just need to make sure the non fraction part agrees
- self.assertIn(v2_token['access']['token']['expires'][:-1],
- token_data['token']['expires_at'])
- self.assertEqual(v2_token['access']['user']['roles'][0]['id'],
- token_data['token']['roles'][0]['id'])
+ r = self.admin_request(
+ method='GET',
+ path='/v2.0/tokens/%s' % v3_token,
+ token=CONF.admin_token)
+ v2_token_data = r.result
- def test_v3_v2_hashed_pki_token_intermix(self):
- auth_data = self.build_authentication_request(
- user_id=self.default_domain_user['id'],
- password=self.default_domain_user['password'],
- project_id=self.default_domain_project['id'])
- resp = self.v3_authenticate_token(auth_data)
- token_data = resp.result
- token = resp.headers.get('X-Subject-Token')
-
- # should be able to validate a hash PKI token in v2 too
- token = cms.cms_hash_token(token)
- path = '/v2.0/tokens/%s' % (token)
- resp = self.admin_request(path=path,
- token='ADMIN',
- method='GET')
- v2_token = resp.result
- self.assertEqual(v2_token['access']['user']['id'],
- token_data['token']['user']['id'])
+ self.assertEqual(v2_token_data['access']['user']['id'],
+ v3_token_data['token']['user']['id'])
# v2 token time has not fraction of second precision so
# just need to make sure the non fraction part agrees
- self.assertIn(v2_token['access']['token']['expires'][:-1],
- token_data['token']['expires_at'])
- self.assertEqual(v2_token['access']['user']['roles'][0]['id'],
- token_data['token']['roles'][0]['id'])
+ self.assertIn(v2_token_data['access']['token']['expires'][:-1],
+ v3_token_data['token']['expires_at'])
+ self.assertEqual(v2_token_data['access']['user']['roles'][0]['name'],
+ v3_token_data['token']['roles'][0]['name'])
def test_v2_v3_unscoped_token_intermix(self):
- body = {
- 'auth': {
- 'passwordCredentials': {
- 'userId': self.user['id'],
- 'password': self.user['password']
+ r = self.admin_request(
+ method='POST',
+ path='/v2.0/tokens',
+ body={
+ 'auth': {
+ 'passwordCredentials': {
+ 'userId': self.default_domain_user['id'],
+ 'password': self.default_domain_user['password']
+ }
}
- }}
- resp = self.admin_request(path='/v2.0/tokens',
- method='POST',
- body=body)
- v2_token_data = resp.result
+ })
+ v2_token_data = r.result
v2_token = v2_token_data['access']['token']['id']
- headers = {'X-Subject-Token': v2_token}
- resp = self.get('/auth/tokens', headers=headers)
- token_data = resp.result
+
+ r = self.get('/auth/tokens', headers={'X-Subject-Token': v2_token})
+ # FIXME(dolph): Due to bug 1476329, v2 tokens validated on v3 are
+ # missing timezones, so they will not pass this assertion.
+ # self.assertValidUnscopedTokenResponse(r)
+ v3_token_data = r.result
+
self.assertEqual(v2_token_data['access']['user']['id'],
- token_data['token']['user']['id'])
+ v3_token_data['token']['user']['id'])
# v2 token time has not fraction of second precision so
# just need to make sure the non fraction part agrees
self.assertIn(v2_token_data['access']['token']['expires'][-1],
- token_data['token']['expires_at'])
+ v3_token_data['token']['expires_at'])
def test_v2_v3_token_intermix(self):
- body = {
- 'auth': {
- 'passwordCredentials': {
- 'userId': self.user['id'],
- 'password': self.user['password']
- },
- 'tenantId': self.project['id']
- }}
- resp = self.admin_request(path='/v2.0/tokens',
- method='POST',
- body=body)
- v2_token_data = resp.result
+ r = self.admin_request(
+ path='/v2.0/tokens',
+ method='POST',
+ body={
+ 'auth': {
+ 'passwordCredentials': {
+ 'userId': self.default_domain_user['id'],
+ 'password': self.default_domain_user['password']
+ },
+ 'tenantId': self.default_domain_project['id']
+ }
+ })
+ v2_token_data = r.result
v2_token = v2_token_data['access']['token']['id']
- headers = {'X-Subject-Token': v2_token}
- resp = self.get('/auth/tokens', headers=headers)
- token_data = resp.result
+
+ r = self.get('/auth/tokens', headers={'X-Subject-Token': v2_token})
+ # FIXME(dolph): Due to bug 1476329, v2 tokens validated on v3 are
+ # missing timezones, so they will not pass this assertion.
+ # self.assertValidProjectScopedTokenResponse(r)
+ v3_token_data = r.result
+
self.assertEqual(v2_token_data['access']['user']['id'],
- token_data['token']['user']['id'])
+ v3_token_data['token']['user']['id'])
# v2 token time has not fraction of second precision so
# just need to make sure the non fraction part agrees
self.assertIn(v2_token_data['access']['token']['expires'][-1],
- token_data['token']['expires_at'])
+ v3_token_data['token']['expires_at'])
self.assertEqual(v2_token_data['access']['user']['roles'][0]['name'],
- token_data['token']['roles'][0]['name'])
+ v3_token_data['token']['roles'][0]['name'])
v2_issued_at = timeutils.parse_isotime(
v2_token_data['access']['token']['issued_at'])
v3_issued_at = timeutils.parse_isotime(
- token_data['token']['issued_at'])
+ v3_token_data['token']['issued_at'])
self.assertEqual(v2_issued_at, v3_issued_at)
+ def test_v2_token_deleted_on_v3(self):
+ # Create a v2 token.
+ body = {
+ 'auth': {
+ 'passwordCredentials': {
+ 'userId': self.default_domain_user['id'],
+ 'password': self.default_domain_user['password']
+ },
+ 'tenantId': self.default_domain_project['id']
+ }
+ }
+ r = self.admin_request(
+ path='/v2.0/tokens', method='POST', body=body)
+ v2_token = r.result['access']['token']['id']
+
+ # Delete the v2 token using v3.
+ resp = self.delete(
+ '/auth/tokens', headers={'X-Subject-Token': v2_token})
+ self.assertEqual(resp.status_code, 204)
+
+ # Attempting to use the deleted token on v2 should fail.
+ self.admin_request(
+ path='/v2.0/tenants', method='GET', token=v2_token,
+ expected_status=401)
+
def test_rescoping_token(self):
- expires = self.token_data['token']['expires_at']
- auth_data = self.build_authentication_request(
- token=self.token,
- project_id=self.project_id)
- r = self.v3_authenticate_token(auth_data)
+ expires = self.v3_token_data['token']['expires_at']
+
+ # rescope the token
+ r = self.v3_authenticate_token(self.build_authentication_request(
+ token=self.v3_token,
+ project_id=self.project_id))
self.assertValidProjectScopedTokenResponse(r)
- # make sure expires stayed the same
+
+ # ensure token expiration stayed the same
self.assertEqual(expires, r.result['token']['expires_at'])
def test_check_token(self):
@@ -394,12 +412,13 @@ class TokenAPITests(object):
self.assertValidUnscopedTokenResponse(r)
def test_validate_token_nocatalog(self):
- auth_data = self.build_authentication_request(
+ v3_token = self.get_requested_token(self.build_authentication_request(
user_id=self.user['id'],
password=self.user['password'],
- project_id=self.project['id'])
- headers = {'X-Subject-Token': self.get_requested_token(auth_data)}
- r = self.get('/auth/tokens?nocatalog', headers=headers)
+ project_id=self.project['id']))
+ r = self.get(
+ '/auth/tokens?nocatalog',
+ headers={'X-Subject-Token': v3_token})
self.assertValidProjectScopedTokenResponse(r, require_catalog=False)
@@ -420,10 +439,10 @@ class AllowRescopeScopedTokenDisabledTests(test_v3.RestfulTestCase):
def _v2_token(self):
body = {
'auth': {
- "tenantId": self.project['id'],
+ "tenantId": self.default_domain_project['id'],
'passwordCredentials': {
- 'userId': self.user['id'],
- 'password': self.user['password']
+ 'userId': self.default_domain_user['id'],
+ 'password': self.default_domain_user['password']
}
}}
resp = self.admin_request(path='/v2.0/tokens',
@@ -462,7 +481,7 @@ class AllowRescopeScopedTokenDisabledTests(test_v3.RestfulTestCase):
def test_rescoped_domain_token_disabled(self):
self.domainA = self.new_domain_ref()
- self.assignment_api.create_domain(self.domainA['id'], self.domainA)
+ self.resource_api.create_domain(self.domainA['id'], self.domainA)
self.assignment_api.create_grant(self.role['id'],
user_id=self.user['id'],
domain_id=self.domainA['id'])
@@ -485,37 +504,77 @@ class AllowRescopeScopedTokenDisabledTests(test_v3.RestfulTestCase):
class TestPKITokenAPIs(test_v3.RestfulTestCase, TokenAPITests):
def config_overrides(self):
super(TestPKITokenAPIs, self).config_overrides()
- self.config_fixture.config(
- group='token',
- provider='keystone.token.providers.pki.Provider')
+ self.config_fixture.config(group='token', provider='pki')
def setUp(self):
super(TestPKITokenAPIs, self).setUp()
self.doSetUp()
+ def verify_token(self, *args, **kwargs):
+ return cms.verify_token(*args, **kwargs)
-class TestPKIZTokenAPIs(test_v3.RestfulTestCase, TokenAPITests):
+ def test_v3_token_id(self):
+ auth_data = self.build_authentication_request(
+ user_id=self.user['id'],
+ password=self.user['password'])
+ resp = self.v3_authenticate_token(auth_data)
+ token_data = resp.result
+ token_id = resp.headers.get('X-Subject-Token')
+ self.assertIn('expires_at', token_data['token'])
- def verify_token(self, *args, **kwargs):
- return cms.pkiz_verify(*args, **kwargs)
+ decoded_token = self.verify_token(token_id, CONF.signing.certfile,
+ CONF.signing.ca_certs)
+ decoded_token_dict = json.loads(decoded_token)
+
+ token_resp_dict = json.loads(resp.body)
+
+ self.assertEqual(decoded_token_dict, token_resp_dict)
+ # should be able to validate hash PKI token as well
+ hash_token_id = cms.cms_hash_token(token_id)
+ headers = {'X-Subject-Token': hash_token_id}
+ resp = self.get('/auth/tokens', headers=headers)
+ expected_token_data = resp.result
+ self.assertDictEqual(expected_token_data, token_data)
+ def test_v3_v2_hashed_pki_token_intermix(self):
+ auth_data = self.build_authentication_request(
+ user_id=self.default_domain_user['id'],
+ password=self.default_domain_user['password'],
+ project_id=self.default_domain_project['id'])
+ resp = self.v3_authenticate_token(auth_data)
+ token_data = resp.result
+ token = resp.headers.get('X-Subject-Token')
+
+ # should be able to validate a hash PKI token in v2 too
+ token = cms.cms_hash_token(token)
+ path = '/v2.0/tokens/%s' % (token)
+ resp = self.admin_request(path=path,
+ token=CONF.admin_token,
+ method='GET')
+ v2_token = resp.result
+ self.assertEqual(v2_token['access']['user']['id'],
+ token_data['token']['user']['id'])
+ # v2 token time has not fraction of second precision so
+ # just need to make sure the non fraction part agrees
+ self.assertIn(v2_token['access']['token']['expires'][:-1],
+ token_data['token']['expires_at'])
+ self.assertEqual(v2_token['access']['user']['roles'][0]['id'],
+ token_data['token']['roles'][0]['id'])
+
+
+class TestPKIZTokenAPIs(TestPKITokenAPIs):
def config_overrides(self):
super(TestPKIZTokenAPIs, self).config_overrides()
- self.config_fixture.config(
- group='token',
- provider='keystone.token.providers.pkiz.Provider')
+ self.config_fixture.config(group='token', provider='pkiz')
- def setUp(self):
- super(TestPKIZTokenAPIs, self).setUp()
- self.doSetUp()
+ def verify_token(self, *args, **kwargs):
+ return cms.pkiz_verify(*args, **kwargs)
class TestUUIDTokenAPIs(test_v3.RestfulTestCase, TokenAPITests):
def config_overrides(self):
super(TestUUIDTokenAPIs, self).config_overrides()
- self.config_fixture.config(
- group='token',
- provider='keystone.token.providers.uuid.Provider')
+ self.config_fixture.config(group='token', provider='uuid')
def setUp(self):
super(TestUUIDTokenAPIs, self).setUp()
@@ -531,10 +590,16 @@ class TestUUIDTokenAPIs(test_v3.RestfulTestCase, TokenAPITests):
self.assertIn('expires_at', token_data['token'])
self.assertFalse(cms.is_asn1_token(token_id))
- def test_v3_v2_hashed_pki_token_intermix(self):
- # this test is only applicable for PKI tokens
- # skipping it for UUID tokens
- pass
+
+class TestFernetTokenAPIs(test_v3.RestfulTestCase, TokenAPITests):
+ def config_overrides(self):
+ super(TestFernetTokenAPIs, self).config_overrides()
+ self.config_fixture.config(group='token', provider='fernet')
+ self.useFixture(ksfixtures.KeyRepository(self.config_fixture))
+
+ def setUp(self):
+ super(TestFernetTokenAPIs, self).setUp()
+ self.doSetUp()
class TestTokenRevokeSelfAndAdmin(test_v3.RestfulTestCase):
@@ -675,12 +740,10 @@ class TestTokenRevokeById(test_v3.RestfulTestCase):
def config_overrides(self):
super(TestTokenRevokeById, self).config_overrides()
- self.config_fixture.config(
- group='revoke',
- driver='keystone.contrib.revoke.backends.kvs.Revoke')
+ self.config_fixture.config(group='revoke', driver='kvs')
self.config_fixture.config(
group='token',
- provider='keystone.token.providers.pki.Provider',
+ provider='pki',
revoke_by_id=False)
def setUp(self):
@@ -1069,7 +1132,7 @@ class TestTokenRevokeById(test_v3.RestfulTestCase):
- Delete the grant group1 has on ProjectA
- Check tokens for user1 & user2 are no longer valid,
since user1 and user2 are members of group1
- - Check token for user3 is still valid
+ - Check token for user3 is invalid too
"""
auth_data = self.build_authentication_request(
@@ -1112,10 +1175,11 @@ class TestTokenRevokeById(test_v3.RestfulTestCase):
self.head('/auth/tokens',
headers={'X-Subject-Token': token2},
expected_status=404)
- # But user3's token should still be valid
+ # But user3's token should be invalid too as revocation is done for
+ # scope role & project
self.head('/auth/tokens',
headers={'X-Subject-Token': token3},
- expected_status=200)
+ expected_status=404)
def test_domain_group_role_assignment_maintains_token(self):
"""Test domain-group role assignment maintains existing token.
@@ -1202,6 +1266,14 @@ class TestTokenRevokeById(test_v3.RestfulTestCase):
def test_removing_role_assignment_does_not_affect_other_users(self):
"""Revoking a role from one user should not affect other users."""
+
+ # This group grant is not needed for the test
+ self.delete(
+ '/projects/%(project_id)s/groups/%(group_id)s/roles/%(role_id)s' %
+ {'project_id': self.projectA['id'],
+ 'group_id': self.group1['id'],
+ 'role_id': self.role1['id']})
+
user1_token = self.get_requested_token(
self.build_authentication_request(
user_id=self.user1['id'],
@@ -1220,12 +1292,6 @@ class TestTokenRevokeById(test_v3.RestfulTestCase):
'project_id': self.projectA['id'],
'user_id': self.user1['id'],
'role_id': self.role1['id']})
- self.delete(
- '/projects/%(project_id)s/groups/%(group_id)s/roles/%(role_id)s' %
- {'project_id': self.projectA['id'],
- 'group_id': self.group1['id'],
- 'role_id': self.role1['id']})
-
# authorization for the first user should now fail
self.head('/auth/tokens',
headers={'X-Subject-Token': user1_token},
@@ -1384,6 +1450,58 @@ class TestTokenRevokeById(test_v3.RestfulTestCase):
expected_status=200)
+class TestTokenRevokeByAssignment(TestTokenRevokeById):
+
+ def config_overrides(self):
+ super(TestTokenRevokeById, self).config_overrides()
+ self.config_fixture.config(
+ group='revoke',
+ driver='kvs')
+ self.config_fixture.config(
+ group='token',
+ provider='uuid',
+ revoke_by_id=True)
+
+ def test_removing_role_assignment_keeps_other_project_token_groups(self):
+ """Test assignment isolation.
+
+ Revoking a group role from one project should not invalidate all group
+ users' tokens
+ """
+ self.assignment_api.create_grant(self.role1['id'],
+ group_id=self.group1['id'],
+ project_id=self.projectB['id'])
+
+ project_token = self.get_requested_token(
+ self.build_authentication_request(
+ user_id=self.user1['id'],
+ password=self.user1['password'],
+ project_id=self.projectB['id']))
+
+ other_project_token = self.get_requested_token(
+ self.build_authentication_request(
+ user_id=self.user1['id'],
+ password=self.user1['password'],
+ project_id=self.projectA['id']))
+
+ self.assignment_api.delete_grant(self.role1['id'],
+ group_id=self.group1['id'],
+ project_id=self.projectB['id'])
+
+ # authorization for the projectA should still succeed
+ self.head('/auth/tokens',
+ headers={'X-Subject-Token': other_project_token},
+ expected_status=200)
+ # while token for the projectB should not
+ self.head('/auth/tokens',
+ headers={'X-Subject-Token': project_token},
+ expected_status=404)
+ revoked_tokens = [
+ t['id'] for t in self.token_provider_api.list_revoked_tokens()]
+ # token is in token revocation list
+ self.assertIn(project_token, revoked_tokens)
+
+
class TestTokenRevokeApi(TestTokenRevokeById):
EXTENSION_NAME = 'revoke'
EXTENSION_TO_ADD = 'revoke_extension'
@@ -1391,12 +1509,10 @@ class TestTokenRevokeApi(TestTokenRevokeById):
"""Test token revocation on the v3 Identity API."""
def config_overrides(self):
super(TestTokenRevokeApi, self).config_overrides()
- self.config_fixture.config(
- group='revoke',
- driver='keystone.contrib.revoke.backends.kvs.Revoke')
+ self.config_fixture.config(group='revoke', driver='kvs')
self.config_fixture.config(
group='token',
- provider='keystone.token.providers.pki.Provider',
+ provider='pki',
revoke_by_id=False)
def assertValidDeletedProjectResponse(self, events_response, project_id):
@@ -1424,7 +1540,7 @@ class TestTokenRevokeApi(TestTokenRevokeById):
def assertValidRevokedTokenResponse(self, events_response, **kwargs):
events = events_response['events']
self.assertEqual(1, len(events))
- for k, v in six.iteritems(kwargs):
+ for k, v in kwargs.items():
self.assertEqual(v, events[0].get(k))
self.assertIsNotNone(events[0]['issued_before'])
self.assertIsNotNone(events_response['links'])
@@ -1494,7 +1610,7 @@ class TestTokenRevokeApi(TestTokenRevokeById):
def assertEventDataInList(self, events, **kwargs):
found = False
for e in events:
- for key, value in six.iteritems(kwargs):
+ for key, value in kwargs.items():
try:
if e[key] != value:
break
@@ -1512,8 +1628,7 @@ class TestTokenRevokeApi(TestTokenRevokeById):
'find event with key-value pairs. Expected: '
'"%(expected)s" Events: "%(events)s"' %
{'expected': ','.join(
- ["'%s=%s'" % (k, v) for k, v in six.iteritems(
- kwargs)]),
+ ["'%s=%s'" % (k, v) for k, v in kwargs.items()]),
'events': events})
def test_list_delete_token_shows_in_event_list(self):
@@ -1569,8 +1684,8 @@ class TestTokenRevokeApi(TestTokenRevokeById):
expected_status=200).json_body['events']
self.assertEqual(2, len(events))
- future = timeutils.isotime(timeutils.utcnow() +
- datetime.timedelta(seconds=1000))
+ future = utils.isotime(timeutils.utcnow() +
+ datetime.timedelta(seconds=1000))
events = self.get('/OS-REVOKE/events?since=%s' % (future),
expected_status=200).json_body['events']
@@ -1596,148 +1711,116 @@ class TestAuthExternalDisabled(test_v3.RestfulTestCase):
auth_context)
-class TestAuthExternalLegacyDefaultDomain(test_v3.RestfulTestCase):
- content_type = 'json'
-
- def config_overrides(self):
- super(TestAuthExternalLegacyDefaultDomain, self).config_overrides()
- self.auth_plugin_config_override(
- methods=['external', 'password', 'token'],
- external='keystone.auth.plugins.external.LegacyDefaultDomain',
- password='keystone.auth.plugins.password.Password',
- token='keystone.auth.plugins.token.Token')
-
- def test_remote_user_no_realm(self):
- self.config_fixture.config(group='auth', methods='external')
- api = auth.controllers.Auth()
- context, auth_info, auth_context = self.build_external_auth_request(
- self.default_domain_user['name'])
- api.authenticate(context, auth_info, auth_context)
- self.assertEqual(auth_context['user_id'],
- self.default_domain_user['id'])
-
- def test_remote_user_no_domain(self):
- api = auth.controllers.Auth()
- context, auth_info, auth_context = self.build_external_auth_request(
- self.user['name'])
- self.assertRaises(exception.Unauthorized,
- api.authenticate,
- context,
- auth_info,
- auth_context)
-
-
-class TestAuthExternalLegacyDomain(test_v3.RestfulTestCase):
+class TestAuthExternalDomain(test_v3.RestfulTestCase):
content_type = 'json'
def config_overrides(self):
- super(TestAuthExternalLegacyDomain, self).config_overrides()
- self.auth_plugin_config_override(
- methods=['external', 'password', 'token'],
- external='keystone.auth.plugins.external.LegacyDomain',
- password='keystone.auth.plugins.password.Password',
- token='keystone.auth.plugins.token.Token')
+ super(TestAuthExternalDomain, self).config_overrides()
+ self.kerberos = False
+ self.auth_plugin_config_override(external='Domain')
def test_remote_user_with_realm(self):
api = auth.controllers.Auth()
- remote_user = '%s@%s' % (self.user['name'], self.domain['name'])
+ remote_user = self.user['name']
+ remote_domain = self.domain['name']
context, auth_info, auth_context = self.build_external_auth_request(
- remote_user)
+ remote_user, remote_domain=remote_domain, kerberos=self.kerberos)
api.authenticate(context, auth_info, auth_context)
- self.assertEqual(auth_context['user_id'], self.user['id'])
+ self.assertEqual(self.user['id'], auth_context['user_id'])
# Now test to make sure the user name can, itself, contain the
# '@' character.
user = {'name': 'myname@mydivision'}
self.identity_api.update_user(self.user['id'], user)
- remote_user = '%s@%s' % (user['name'], self.domain['name'])
+ remote_user = user['name']
context, auth_info, auth_context = self.build_external_auth_request(
- remote_user)
+ remote_user, remote_domain=remote_domain, kerberos=self.kerberos)
api.authenticate(context, auth_info, auth_context)
- self.assertEqual(auth_context['user_id'], self.user['id'])
+ self.assertEqual(self.user['id'], auth_context['user_id'])
def test_project_id_scoped_with_remote_user(self):
self.config_fixture.config(group='token', bind=['kerberos'])
auth_data = self.build_authentication_request(
- project_id=self.project['id'])
- remote_user = '%s@%s' % (self.user['name'], self.domain['name'])
+ project_id=self.project['id'],
+ kerberos=self.kerberos)
+ remote_user = self.user['name']
+ remote_domain = self.domain['name']
self.admin_app.extra_environ.update({'REMOTE_USER': remote_user,
+ 'REMOTE_DOMAIN': remote_domain,
'AUTH_TYPE': 'Negotiate'})
r = self.v3_authenticate_token(auth_data)
token = self.assertValidProjectScopedTokenResponse(r)
- self.assertEqual(token['bind']['kerberos'], self.user['name'])
+ self.assertEqual(self.user['name'], token['bind']['kerberos'])
def test_unscoped_bind_with_remote_user(self):
self.config_fixture.config(group='token', bind=['kerberos'])
- auth_data = self.build_authentication_request()
- remote_user = '%s@%s' % (self.user['name'], self.domain['name'])
+ auth_data = self.build_authentication_request(kerberos=self.kerberos)
+ remote_user = self.user['name']
+ remote_domain = self.domain['name']
self.admin_app.extra_environ.update({'REMOTE_USER': remote_user,
+ 'REMOTE_DOMAIN': remote_domain,
'AUTH_TYPE': 'Negotiate'})
r = self.v3_authenticate_token(auth_data)
token = self.assertValidUnscopedTokenResponse(r)
- self.assertEqual(token['bind']['kerberos'], self.user['name'])
+ self.assertEqual(self.user['name'], token['bind']['kerberos'])
-class TestAuthExternalDomain(test_v3.RestfulTestCase):
+class TestAuthExternalDefaultDomain(test_v3.RestfulTestCase):
content_type = 'json'
def config_overrides(self):
- super(TestAuthExternalDomain, self).config_overrides()
+ super(TestAuthExternalDefaultDomain, self).config_overrides()
self.kerberos = False
self.auth_plugin_config_override(
- methods=['external', 'password', 'token'],
- external='keystone.auth.plugins.external.Domain',
- password='keystone.auth.plugins.password.Password',
- token='keystone.auth.plugins.token.Token')
+ external='keystone.auth.plugins.external.DefaultDomain')
- def test_remote_user_with_realm(self):
+ def test_remote_user_with_default_domain(self):
api = auth.controllers.Auth()
- remote_user = self.user['name']
- remote_domain = self.domain['name']
+ remote_user = self.default_domain_user['name']
context, auth_info, auth_context = self.build_external_auth_request(
- remote_user, remote_domain=remote_domain, kerberos=self.kerberos)
+ remote_user, kerberos=self.kerberos)
api.authenticate(context, auth_info, auth_context)
- self.assertEqual(auth_context['user_id'], self.user['id'])
+ self.assertEqual(self.default_domain_user['id'],
+ auth_context['user_id'])
# Now test to make sure the user name can, itself, contain the
# '@' character.
user = {'name': 'myname@mydivision'}
- self.identity_api.update_user(self.user['id'], user)
+ self.identity_api.update_user(self.default_domain_user['id'], user)
remote_user = user['name']
context, auth_info, auth_context = self.build_external_auth_request(
- remote_user, remote_domain=remote_domain, kerberos=self.kerberos)
+ remote_user, kerberos=self.kerberos)
api.authenticate(context, auth_info, auth_context)
- self.assertEqual(auth_context['user_id'], self.user['id'])
+ self.assertEqual(self.default_domain_user['id'],
+ auth_context['user_id'])
def test_project_id_scoped_with_remote_user(self):
self.config_fixture.config(group='token', bind=['kerberos'])
auth_data = self.build_authentication_request(
- project_id=self.project['id'],
+ project_id=self.default_domain_project['id'],
kerberos=self.kerberos)
- remote_user = self.user['name']
- remote_domain = self.domain['name']
+ remote_user = self.default_domain_user['name']
self.admin_app.extra_environ.update({'REMOTE_USER': remote_user,
- 'REMOTE_DOMAIN': remote_domain,
'AUTH_TYPE': 'Negotiate'})
r = self.v3_authenticate_token(auth_data)
token = self.assertValidProjectScopedTokenResponse(r)
- self.assertEqual(token['bind']['kerberos'], self.user['name'])
+ self.assertEqual(self.default_domain_user['name'],
+ token['bind']['kerberos'])
def test_unscoped_bind_with_remote_user(self):
self.config_fixture.config(group='token', bind=['kerberos'])
auth_data = self.build_authentication_request(kerberos=self.kerberos)
- remote_user = self.user['name']
- remote_domain = self.domain['name']
+ remote_user = self.default_domain_user['name']
self.admin_app.extra_environ.update({'REMOTE_USER': remote_user,
- 'REMOTE_DOMAIN': remote_domain,
'AUTH_TYPE': 'Negotiate'})
r = self.v3_authenticate_token(auth_data)
token = self.assertValidUnscopedTokenResponse(r)
- self.assertEqual(token['bind']['kerberos'], self.user['name'])
+ self.assertEqual(self.default_domain_user['name'],
+ token['bind']['kerberos'])
class TestAuthKerberos(TestAuthExternalDomain):
@@ -1746,10 +1829,7 @@ class TestAuthKerberos(TestAuthExternalDomain):
super(TestAuthKerberos, self).config_overrides()
self.kerberos = True
self.auth_plugin_config_override(
- methods=['kerberos', 'password', 'token'],
- kerberos='keystone.auth.plugins.external.KerberosDomain',
- password='keystone.auth.plugins.password.Password',
- token='keystone.auth.plugins.token.Token')
+ methods=['kerberos', 'password', 'token'])
class TestAuth(test_v3.RestfulTestCase):
@@ -1815,7 +1895,7 @@ class TestAuth(test_v3.RestfulTestCase):
password=self.user['password'])
r = self.v3_authenticate_token(auth_data)
self.assertValidProjectScopedTokenResponse(r)
- self.assertEqual(r.result['token']['project']['id'], project['id'])
+ self.assertEqual(project['id'], r.result['token']['project']['id'])
def test_default_project_id_scoped_token_with_user_id_no_catalog(self):
project = self._second_project_as_default()
@@ -1826,7 +1906,7 @@ class TestAuth(test_v3.RestfulTestCase):
password=self.user['password'])
r = self.post('/auth/tokens?nocatalog', body=auth_data, noauth=True)
self.assertValidProjectScopedTokenResponse(r, require_catalog=False)
- self.assertEqual(r.result['token']['project']['id'], project['id'])
+ self.assertEqual(project['id'], r.result['token']['project']['id'])
def test_explicit_unscoped_token(self):
self._second_project_as_default()
@@ -1850,8 +1930,8 @@ class TestAuth(test_v3.RestfulTestCase):
project_id=self.project['id'])
r = self.post('/auth/tokens?nocatalog', body=auth_data, noauth=True)
self.assertValidProjectScopedTokenResponse(r, require_catalog=False)
- self.assertEqual(r.result['token']['project']['id'],
- self.project['id'])
+ self.assertEqual(self.project['id'],
+ r.result['token']['project']['id'])
def test_auth_catalog_attributes(self):
auth_data = self.build_authentication_request(
@@ -2345,13 +2425,12 @@ class TestAuth(test_v3.RestfulTestCase):
self.v3_authenticate_token(auth_data, expected_status=401)
def test_remote_user_no_realm(self):
- CONF.auth.methods = 'external'
api = auth.controllers.Auth()
context, auth_info, auth_context = self.build_external_auth_request(
self.default_domain_user['name'])
api.authenticate(context, auth_info, auth_context)
- self.assertEqual(auth_context['user_id'],
- self.default_domain_user['id'])
+ self.assertEqual(self.default_domain_user['id'],
+ auth_context['user_id'])
# Now test to make sure the user name can, itself, contain the
# '@' character.
user = {'name': 'myname@mydivision'}
@@ -2359,8 +2438,8 @@ class TestAuth(test_v3.RestfulTestCase):
context, auth_info, auth_context = self.build_external_auth_request(
user["name"])
api.authenticate(context, auth_info, auth_context)
- self.assertEqual(auth_context['user_id'],
- self.default_domain_user['id'])
+ self.assertEqual(self.default_domain_user['id'],
+ auth_context['user_id'])
def test_remote_user_no_domain(self):
api = auth.controllers.Auth()
@@ -2441,8 +2520,8 @@ class TestAuth(test_v3.RestfulTestCase):
headers = {'X-Subject-Token': token}
r = self.get('/auth/tokens', headers=headers, token=token)
token = self.assertValidProjectScopedTokenResponse(r)
- self.assertEqual(token['bind']['kerberos'],
- self.default_domain_user['name'])
+ self.assertEqual(self.default_domain_user['name'],
+ token['bind']['kerberos'])
def test_auth_with_bind_token(self):
self.config_fixture.config(group='token', bind=['kerberos'])
@@ -2455,7 +2534,7 @@ class TestAuth(test_v3.RestfulTestCase):
# the unscoped token should have bind information in it
token = self.assertValidUnscopedTokenResponse(r)
- self.assertEqual(token['bind']['kerberos'], remote_user)
+ self.assertEqual(remote_user, token['bind']['kerberos'])
token = r.headers.get('X-Subject-Token')
@@ -2466,7 +2545,7 @@ class TestAuth(test_v3.RestfulTestCase):
token = self.assertValidProjectScopedTokenResponse(r)
# the bind information should be carried over from the original token
- self.assertEqual(token['bind']['kerberos'], remote_user)
+ self.assertEqual(remote_user, token['bind']['kerberos'])
def test_v2_v3_bind_token_intermix(self):
self.config_fixture.config(group='token', bind='kerberos')
@@ -2484,7 +2563,7 @@ class TestAuth(test_v3.RestfulTestCase):
v2_token_data = resp.result
bind = v2_token_data['access']['token']['bind']
- self.assertEqual(bind['kerberos'], self.default_domain_user['name'])
+ self.assertEqual(self.default_domain_user['name'], bind['kerberos'])
v2_token_id = v2_token_data['access']['token']['id']
# NOTE(gyee): self.get() will try to obtain an auth token if one
@@ -2613,12 +2692,8 @@ class TestAuth(test_v3.RestfulTestCase):
class TestAuthJSONExternal(test_v3.RestfulTestCase):
content_type = 'json'
- def config_overrides(self):
- super(TestAuthJSONExternal, self).config_overrides()
- self.config_fixture.config(group='auth', methods='')
-
def auth_plugin_config_override(self, methods=None, **method_classes):
- self.config_fixture.config(group='auth', methods='')
+ self.config_fixture.config(group='auth', methods=[])
def test_remote_user_no_method(self):
api = auth.controllers.Auth()
@@ -2787,12 +2862,12 @@ class TestTrustRedelegation(test_v3.RestfulTestCase):
self.post('/OS-TRUST/trusts',
body={'trust': self.chained_trust_ref},
token=trust_token,
- expected_status=403)
+ expected_status=400)
def test_roles_subset(self):
# Build second role
role = self.new_role_ref()
- self.assignment_api.create_role(role['id'], role)
+ self.role_api.create_role(role['id'], role)
# assign a new role to the user
self.assignment_api.create_grant(role_id=role['id'],
user_id=self.user_id,
@@ -2860,7 +2935,7 @@ class TestTrustRedelegation(test_v3.RestfulTestCase):
# Build second trust with a role not in parent's roles
role = self.new_role_ref()
- self.assignment_api.create_role(role['id'], role)
+ self.role_api.create_role(role['id'], role)
# assign a new role to the user
self.assignment_api.create_grant(role_id=role['id'],
user_id=self.user_id,
@@ -2895,7 +2970,7 @@ class TestTrustRedelegation(test_v3.RestfulTestCase):
# Check that allow_redelegation == False caused redelegation_count
# to be set to 0, while allow_redelegation is removed
self.assertNotIn('allow_redelegation', trust)
- self.assertEqual(trust['redelegation_count'], 0)
+ self.assertEqual(0, trust['redelegation_count'])
trust_token = self._get_trust_token(trust)
# Build third trust, same as second
@@ -2921,7 +2996,7 @@ class TestTrustChain(test_v3.RestfulTestCase):
# Create trust chain
self.user_chain = list()
self.trust_chain = list()
- for _ in xrange(3):
+ for _ in range(3):
user_ref = self.new_user_ref(domain_id=self.domain_id)
user = self.identity_api.create_user(user_ref)
user['password'] = user_ref['password']
@@ -3067,12 +3142,10 @@ class TestTrustAuth(test_v3.RestfulTestCase):
def config_overrides(self):
super(TestTrustAuth, self).config_overrides()
- self.config_fixture.config(
- group='revoke',
- driver='keystone.contrib.revoke.backends.kvs.Revoke')
+ self.config_fixture.config(group='revoke', driver='kvs')
self.config_fixture.config(
group='token',
- provider='keystone.token.providers.pki.Provider',
+ provider='pki',
revoke_by_id=False)
self.config_fixture.config(group='trust', enabled=True)
@@ -3139,7 +3212,7 @@ class TestTrustAuth(test_v3.RestfulTestCase):
expected_status=200)
trust = r.result.get('trust')
self.assertIsNotNone(trust)
- self.assertEqual(trust['remaining_uses'], 1)
+ self.assertEqual(1, trust['remaining_uses'])
def test_create_one_time_use_trust(self):
trust = self._initialize_test_consume_trust(1)
@@ -3320,26 +3393,6 @@ class TestTrustAuth(test_v3.RestfulTestCase):
role_names=[uuid.uuid4().hex])
self.post('/OS-TRUST/trusts', body={'trust': ref}, expected_status=404)
- def test_create_expired_trust(self):
- ref = self.new_trust_ref(
- trustor_user_id=self.user_id,
- trustee_user_id=self.trustee_user_id,
- project_id=self.project_id,
- expires=dict(seconds=-1),
- role_ids=[self.role_id])
- r = self.post('/OS-TRUST/trusts', body={'trust': ref})
- trust = self.assertValidTrustResponse(r, ref)
-
- self.get('/OS-TRUST/trusts/%(trust_id)s' % {
- 'trust_id': trust['id']},
- expected_status=404)
-
- auth_data = self.build_authentication_request(
- user_id=self.trustee_user['id'],
- password=self.trustee_user['password'],
- trust_id=trust['id'])
- self.v3_authenticate_token(auth_data, expected_status=401)
-
def test_v3_v2_intermix_trustor_not_in_default_domain_failed(self):
ref = self.new_trust_ref(
trustor_user_id=self.user_id,
@@ -3365,7 +3418,8 @@ class TestTrustAuth(test_v3.RestfulTestCase):
# now validate the v3 token with v2 API
path = '/v2.0/tokens/%s' % (token)
self.admin_request(
- path=path, token='ADMIN', method='GET', expected_status=401)
+ path=path, token=CONF.admin_token,
+ method='GET', expected_status=401)
def test_v3_v2_intermix_trustor_not_in_default_domaini_failed(self):
ref = self.new_trust_ref(
@@ -3397,7 +3451,8 @@ class TestTrustAuth(test_v3.RestfulTestCase):
# now validate the v3 token with v2 API
path = '/v2.0/tokens/%s' % (token)
self.admin_request(
- path=path, token='ADMIN', method='GET', expected_status=401)
+ path=path, token=CONF.admin_token,
+ method='GET', expected_status=401)
def test_v3_v2_intermix_project_not_in_default_domaini_failed(self):
# create a trustee in default domain to delegate stuff to
@@ -3436,7 +3491,8 @@ class TestTrustAuth(test_v3.RestfulTestCase):
# now validate the v3 token with v2 API
path = '/v2.0/tokens/%s' % (token)
self.admin_request(
- path=path, token='ADMIN', method='GET', expected_status=401)
+ path=path, token=CONF.admin_token,
+ method='GET', expected_status=401)
def test_v3_v2_intermix(self):
# create a trustee in default domain to delegate stuff to
@@ -3474,7 +3530,8 @@ class TestTrustAuth(test_v3.RestfulTestCase):
# now validate the v3 token with v2 API
path = '/v2.0/tokens/%s' % (token)
self.admin_request(
- path=path, token='ADMIN', method='GET', expected_status=200)
+ path=path, token=CONF.admin_token,
+ method='GET', expected_status=200)
def test_exercise_trust_scoped_token_without_impersonation(self):
ref = self.new_trust_ref(
@@ -3494,18 +3551,18 @@ class TestTrustAuth(test_v3.RestfulTestCase):
trust_id=trust['id'])
r = self.v3_authenticate_token(auth_data)
self.assertValidProjectTrustScopedTokenResponse(r, self.trustee_user)
- self.assertEqual(r.result['token']['user']['id'],
- self.trustee_user['id'])
- self.assertEqual(r.result['token']['user']['name'],
- self.trustee_user['name'])
- self.assertEqual(r.result['token']['user']['domain']['id'],
- self.domain['id'])
- self.assertEqual(r.result['token']['user']['domain']['name'],
- self.domain['name'])
- self.assertEqual(r.result['token']['project']['id'],
- self.project['id'])
- self.assertEqual(r.result['token']['project']['name'],
- self.project['name'])
+ self.assertEqual(self.trustee_user['id'],
+ r.result['token']['user']['id'])
+ self.assertEqual(self.trustee_user['name'],
+ r.result['token']['user']['name'])
+ self.assertEqual(self.domain['id'],
+ r.result['token']['user']['domain']['id'])
+ self.assertEqual(self.domain['name'],
+ r.result['token']['user']['domain']['name'])
+ self.assertEqual(self.project['id'],
+ r.result['token']['project']['id'])
+ self.assertEqual(self.project['name'],
+ r.result['token']['project']['name'])
def test_exercise_trust_scoped_token_with_impersonation(self):
ref = self.new_trust_ref(
@@ -3525,16 +3582,16 @@ class TestTrustAuth(test_v3.RestfulTestCase):
trust_id=trust['id'])
r = self.v3_authenticate_token(auth_data)
self.assertValidProjectTrustScopedTokenResponse(r, self.user)
- self.assertEqual(r.result['token']['user']['id'], self.user['id'])
- self.assertEqual(r.result['token']['user']['name'], self.user['name'])
- self.assertEqual(r.result['token']['user']['domain']['id'],
- self.domain['id'])
- self.assertEqual(r.result['token']['user']['domain']['name'],
- self.domain['name'])
- self.assertEqual(r.result['token']['project']['id'],
- self.project['id'])
- self.assertEqual(r.result['token']['project']['name'],
- self.project['name'])
+ self.assertEqual(self.user['id'], r.result['token']['user']['id'])
+ self.assertEqual(self.user['name'], r.result['token']['user']['name'])
+ self.assertEqual(self.domain['id'],
+ r.result['token']['user']['domain']['id'])
+ self.assertEqual(self.domain['name'],
+ r.result['token']['user']['domain']['name'])
+ self.assertEqual(self.project['id'],
+ r.result['token']['project']['id'])
+ self.assertEqual(self.project['name'],
+ r.result['token']['project']['name'])
def test_impersonation_token_cannot_create_new_trust(self):
ref = self.new_trust_ref(
@@ -3950,9 +4007,9 @@ class TestAuthContext(tests.TestCase):
self.auth_context = auth.controllers.AuthContext()
def test_pick_lowest_expires_at(self):
- expires_at_1 = timeutils.isotime(timeutils.utcnow())
- expires_at_2 = timeutils.isotime(timeutils.utcnow() +
- datetime.timedelta(seconds=10))
+ expires_at_1 = utils.isotime(timeutils.utcnow())
+ expires_at_2 = utils.isotime(timeutils.utcnow() +
+ datetime.timedelta(seconds=10))
# make sure auth_context picks the lowest value
self.auth_context['expires_at'] = expires_at_1
self.auth_context['expires_at'] = expires_at_2
@@ -4113,7 +4170,7 @@ class TestFernetTokenProvider(test_v3.RestfulTestCase):
trustor_user_id=self.user_id,
trustee_user_id=trustee_user['id'],
project_id=self.project_id,
- impersonation=True,
+ impersonation=False,
role_ids=[self.role_id])
# Create a trust
@@ -4123,9 +4180,7 @@ class TestFernetTokenProvider(test_v3.RestfulTestCase):
def config_overrides(self):
super(TestFernetTokenProvider, self).config_overrides()
- self.config_fixture.config(
- group='token',
- provider='keystone.token.providers.fernet.Provider')
+ self.config_fixture.config(group='token', provider='fernet')
def test_validate_unscoped_token(self):
unscoped_token = self._get_unscoped_token()
@@ -4135,7 +4190,7 @@ class TestFernetTokenProvider(test_v3.RestfulTestCase):
unscoped_token = self._get_unscoped_token()
tampered_token = (unscoped_token[:50] + uuid.uuid4().hex +
unscoped_token[50 + 32:])
- self._validate_token(tampered_token, expected_status=401)
+ self._validate_token(tampered_token, expected_status=404)
def test_revoke_unscoped_token(self):
unscoped_token = self._get_unscoped_token()
@@ -4215,7 +4270,7 @@ class TestFernetTokenProvider(test_v3.RestfulTestCase):
project_scoped_token = self._get_project_scoped_token()
tampered_token = (project_scoped_token[:50] + uuid.uuid4().hex +
project_scoped_token[50 + 32:])
- self._validate_token(tampered_token, expected_status=401)
+ self._validate_token(tampered_token, expected_status=404)
def test_revoke_project_scoped_token(self):
project_scoped_token = self._get_project_scoped_token()
@@ -4323,7 +4378,7 @@ class TestFernetTokenProvider(test_v3.RestfulTestCase):
# Get a trust scoped token
tampered_token = (trust_scoped_token[:50] + uuid.uuid4().hex +
trust_scoped_token[50 + 32:])
- self._validate_token(tampered_token, expected_status=401)
+ self._validate_token(tampered_token, expected_status=404)
def test_revoke_trust_scoped_token(self):
trustee_user, trust = self._create_trust()
@@ -4454,9 +4509,7 @@ class TestAuthFernetTokenProvider(TestAuth):
def config_overrides(self):
super(TestAuthFernetTokenProvider, self).config_overrides()
- self.config_fixture.config(
- group='token',
- provider='keystone.token.providers.fernet.Provider')
+ self.config_fixture.config(group='token', provider='fernet')
def test_verify_with_bound_token(self):
self.config_fixture.config(group='token', bind='kerberos')
diff --git a/keystone-moon/keystone/tests/unit/test_v3_catalog.py b/keystone-moon/keystone/tests/unit/test_v3_catalog.py
index d231b2e1..f96b2a12 100644
--- a/keystone-moon/keystone/tests/unit/test_v3_catalog.py
+++ b/keystone-moon/keystone/tests/unit/test_v3_catalog.py
@@ -15,6 +15,8 @@
import copy
import uuid
+from testtools import matchers
+
from keystone import catalog
from keystone.tests import unit as tests
from keystone.tests.unit.ksfixtures import database
@@ -154,7 +156,7 @@ class CatalogTestCase(test_v3.RestfulTestCase):
ref2 = self.new_region_ref()
del ref1['description']
- del ref2['description']
+ ref2['description'] = None
resp1 = self.post(
'/regions',
@@ -224,6 +226,39 @@ class CatalogTestCase(test_v3.RestfulTestCase):
body={'region': region})
self.assertValidRegionResponse(r, region)
+ def test_update_region_without_description_keeps_original(self):
+ """Call ``PATCH /regions/{region_id}``."""
+ region_ref = self.new_region_ref()
+
+ resp = self.post('/regions', body={'region': region_ref},
+ expected_status=201)
+
+ region_updates = {
+ # update with something that's not the description
+ 'parent_region_id': self.region_id,
+ }
+ resp = self.patch('/regions/%s' % region_ref['id'],
+ body={'region': region_updates},
+ expected_status=200)
+
+ # NOTE(dstanek): Keystone should keep the original description.
+ self.assertEqual(region_ref['description'],
+ resp.result['region']['description'])
+
+ def test_update_region_with_null_description(self):
+ """Call ``PATCH /regions/{region_id}``."""
+ region = self.new_region_ref()
+ del region['id']
+ region['description'] = None
+ r = self.patch('/regions/%(region_id)s' % {
+ 'region_id': self.region_id},
+ body={'region': region})
+
+ # NOTE(dstanek): Keystone should turn the provided None value into
+ # an empty string before storing in the backend.
+ region['description'] = ''
+ self.assertValidRegionResponse(r, region)
+
def test_delete_region(self):
"""Call ``DELETE /regions/{region_id}``."""
@@ -379,6 +414,133 @@ class CatalogTestCase(test_v3.RestfulTestCase):
r = self.get('/endpoints')
self.assertValidEndpointListResponse(r, ref=self.endpoint)
+ def _create_random_endpoint(self, interface='public',
+ parent_region_id=None):
+ region = self._create_region_with_parent_id(
+ parent_id=parent_region_id)
+ service = self._create_random_service()
+ ref = self.new_endpoint_ref(
+ service_id=service['id'],
+ interface=interface,
+ region_id=region.result['region']['id'])
+
+ response = self.post(
+ '/endpoints',
+ body={'endpoint': ref})
+ return response.json['endpoint']
+
+ def test_list_endpoints_filtered_by_interface(self):
+ """Call ``GET /endpoints?interface={interface}``."""
+ ref = self._create_random_endpoint(interface='internal')
+
+ response = self.get('/endpoints?interface=%s' % ref['interface'])
+ self.assertValidEndpointListResponse(response, ref=ref)
+
+ for endpoint in response.json['endpoints']:
+ self.assertEqual(ref['interface'], endpoint['interface'])
+
+ def test_list_endpoints_filtered_by_service_id(self):
+ """Call ``GET /endpoints?service_id={service_id}``."""
+ ref = self._create_random_endpoint()
+
+ response = self.get('/endpoints?service_id=%s' % ref['service_id'])
+ self.assertValidEndpointListResponse(response, ref=ref)
+
+ for endpoint in response.json['endpoints']:
+ self.assertEqual(ref['service_id'], endpoint['service_id'])
+
+ def test_list_endpoints_filtered_by_region_id(self):
+ """Call ``GET /endpoints?region_id={region_id}``."""
+ ref = self._create_random_endpoint()
+
+ response = self.get('/endpoints?region_id=%s' % ref['region_id'])
+ self.assertValidEndpointListResponse(response, ref=ref)
+
+ for endpoint in response.json['endpoints']:
+ self.assertEqual(ref['region_id'], endpoint['region_id'])
+
+ def test_list_endpoints_filtered_by_parent_region_id(self):
+ """Call ``GET /endpoints?region_id={region_id}``.
+
+ Ensure passing the parent_region_id as filter returns an
+ empty list.
+
+ """
+ parent_region = self._create_region_with_parent_id()
+ parent_region_id = parent_region.result['region']['id']
+ self._create_random_endpoint(parent_region_id=parent_region_id)
+
+ response = self.get('/endpoints?region_id=%s' % parent_region_id)
+ self.assertEqual(0, len(response.json['endpoints']))
+
+ def test_list_endpoints_with_multiple_filters(self):
+ """Call ``GET /endpoints?interface={interface}...``.
+
+ Ensure passing different combinations of interface, region_id and
+ service_id as filters will return the correct result.
+
+ """
+ # interface and region_id specified
+ ref = self._create_random_endpoint(interface='internal')
+ response = self.get('/endpoints?interface=%s&region_id=%s' %
+ (ref['interface'], ref['region_id']))
+ self.assertValidEndpointListResponse(response, ref=ref)
+
+ for endpoint in response.json['endpoints']:
+ self.assertEqual(ref['interface'], endpoint['interface'])
+ self.assertEqual(ref['region_id'], endpoint['region_id'])
+
+ # interface and service_id specified
+ ref = self._create_random_endpoint(interface='internal')
+ response = self.get('/endpoints?interface=%s&service_id=%s' %
+ (ref['interface'], ref['service_id']))
+ self.assertValidEndpointListResponse(response, ref=ref)
+
+ for endpoint in response.json['endpoints']:
+ self.assertEqual(ref['interface'], endpoint['interface'])
+ self.assertEqual(ref['service_id'], endpoint['service_id'])
+
+ # region_id and service_id specified
+ ref = self._create_random_endpoint(interface='internal')
+ response = self.get('/endpoints?region_id=%s&service_id=%s' %
+ (ref['region_id'], ref['service_id']))
+ self.assertValidEndpointListResponse(response, ref=ref)
+
+ for endpoint in response.json['endpoints']:
+ self.assertEqual(ref['region_id'], endpoint['region_id'])
+ self.assertEqual(ref['service_id'], endpoint['service_id'])
+
+ # interface, region_id and service_id specified
+ ref = self._create_random_endpoint(interface='internal')
+ response = self.get(('/endpoints?interface=%s&region_id=%s'
+ '&service_id=%s') %
+ (ref['interface'], ref['region_id'],
+ ref['service_id']))
+ self.assertValidEndpointListResponse(response, ref=ref)
+
+ for endpoint in response.json['endpoints']:
+ self.assertEqual(ref['interface'], endpoint['interface'])
+ self.assertEqual(ref['region_id'], endpoint['region_id'])
+ self.assertEqual(ref['service_id'], endpoint['service_id'])
+
+ def test_list_endpoints_with_random_filter_values(self):
+ """Call ``GET /endpoints?interface={interface}...``.
+
+ Ensure passing random values for: interface, region_id and
+ service_id will return an empty list.
+
+ """
+ self._create_random_endpoint(interface='internal')
+
+ response = self.get('/endpoints?interface=%s' % uuid.uuid4().hex)
+ self.assertEqual(0, len(response.json['endpoints']))
+
+ response = self.get('/endpoints?region_id=%s' % uuid.uuid4().hex)
+ self.assertEqual(0, len(response.json['endpoints']))
+
+ response = self.get('/endpoints?service_id=%s' % uuid.uuid4().hex)
+ self.assertEqual(0, len(response.json['endpoints']))
+
def test_create_endpoint_no_enabled(self):
"""Call ``POST /endpoints``."""
ref = self.new_endpoint_ref(service_id=self.service_id)
@@ -582,6 +744,62 @@ class CatalogTestCase(test_v3.RestfulTestCase):
self.assertEqual(endpoint_v2['region'], endpoint_v3['region_id'])
+ def test_deleting_endpoint_with_space_in_url(self):
+ # create a v3 endpoint ref
+ ref = self.new_endpoint_ref(service_id=self.service['id'])
+
+ # add a space to all urls (intentional "i d" to test bug)
+ url_with_space = "http://127.0.0.1:8774 /v1.1/\$(tenant_i d)s"
+ ref['publicurl'] = url_with_space
+ ref['internalurl'] = url_with_space
+ ref['adminurl'] = url_with_space
+ ref['url'] = url_with_space
+
+ # add the endpoint to the database
+ self.catalog_api.create_endpoint(ref['id'], ref)
+
+ # delete the endpoint
+ self.delete('/endpoints/%s' % ref['id'])
+
+ # make sure it's deleted (GET should return 404)
+ self.get('/endpoints/%s' % ref['id'], expected_status=404)
+
+ def test_endpoint_create_with_valid_url(self):
+ """Create endpoint with valid url should be tested,too."""
+ # list one valid url is enough, no need to list too much
+ valid_url = 'http://127.0.0.1:8774/v1.1/$(tenant_id)s'
+
+ ref = self.new_endpoint_ref(self.service_id)
+ ref['url'] = valid_url
+ self.post('/endpoints',
+ body={'endpoint': ref},
+ expected_status=201)
+
+ def test_endpoint_create_with_invalid_url(self):
+ """Test the invalid cases: substitutions is not exactly right.
+ """
+ invalid_urls = [
+ # using a substitution that is not whitelisted - KeyError
+ 'http://127.0.0.1:8774/v1.1/$(nonexistent)s',
+
+ # invalid formatting - ValueError
+ 'http://127.0.0.1:8774/v1.1/$(tenant_id)',
+ 'http://127.0.0.1:8774/v1.1/$(tenant_id)t',
+ 'http://127.0.0.1:8774/v1.1/$(tenant_id',
+
+ # invalid type specifier - TypeError
+ # admin_url is a string not an int
+ 'http://127.0.0.1:8774/v1.1/$(admin_url)d',
+ ]
+
+ ref = self.new_endpoint_ref(self.service_id)
+
+ for invalid_url in invalid_urls:
+ ref['url'] = invalid_url
+ self.post('/endpoints',
+ body={'endpoint': ref},
+ expected_status=400)
+
class TestCatalogAPISQL(tests.TestCase):
"""Tests for the catalog Manager against the SQL backend.
@@ -602,9 +820,7 @@ class TestCatalogAPISQL(tests.TestCase):
def config_overrides(self):
super(TestCatalogAPISQL, self).config_overrides()
- self.config_fixture.config(
- group='catalog',
- driver='keystone.catalog.backends.sql.Catalog')
+ self.config_fixture.config(group='catalog', driver='sql')
def new_endpoint_ref(self, service_id):
return {
@@ -643,6 +859,20 @@ class TestCatalogAPISQL(tests.TestCase):
# all three appear in the backend
self.assertEqual(3, len(self.catalog_api.list_endpoints()))
+ # create another valid endpoint - tenant_id will be replaced
+ ref = self.new_endpoint_ref(self.service_id)
+ ref['url'] = 'http://keystone/%(tenant_id)s'
+ self.catalog_api.create_endpoint(ref['id'], ref)
+
+ # there are two valid endpoints, positive check
+ catalog = self.catalog_api.get_v3_catalog(user_id, tenant_id)
+ self.assertThat(catalog[0]['endpoints'], matchers.HasLength(2))
+
+ # If the URL has no 'tenant_id' to substitute, we will skip the
+ # endpoint which contains this kind of URL, negative check.
+ catalog = self.catalog_api.get_v3_catalog(user_id, tenant_id=None)
+ self.assertThat(catalog[0]['endpoints'], matchers.HasLength(1))
+
def test_get_catalog_always_returns_service_name(self):
user_id = uuid.uuid4().hex
tenant_id = uuid.uuid4().hex
@@ -691,9 +921,7 @@ class TestCatalogAPISQLRegions(tests.TestCase):
def config_overrides(self):
super(TestCatalogAPISQLRegions, self).config_overrides()
- self.config_fixture.config(
- group='catalog',
- driver='keystone.catalog.backends.sql.Catalog')
+ self.config_fixture.config(group='catalog', driver='sql')
def new_endpoint_ref(self, service_id):
return {
diff --git a/keystone-moon/keystone/tests/unit/test_v3_controller.py b/keystone-moon/keystone/tests/unit/test_v3_controller.py
index 3ac4ba5a..eef64a82 100644
--- a/keystone-moon/keystone/tests/unit/test_v3_controller.py
+++ b/keystone-moon/keystone/tests/unit/test_v3_controller.py
@@ -15,6 +15,7 @@
import uuid
import six
+from six.moves import range
from testtools import matchers
from keystone.common import controller
diff --git a/keystone-moon/keystone/tests/unit/test_v3_credential.py b/keystone-moon/keystone/tests/unit/test_v3_credential.py
index d792b216..f8f6d35b 100644
--- a/keystone-moon/keystone/tests/unit/test_v3_credential.py
+++ b/keystone-moon/keystone/tests/unit/test_v3_credential.py
@@ -18,6 +18,7 @@ import uuid
from keystoneclient.contrib.ec2 import utils as ec2_utils
from oslo_config import cfg
+from testtools import matchers
from keystone import exception
from keystone.tests.unit import test_v3
@@ -375,14 +376,17 @@ class TestCredentialEc2(CredentialBaseTestCase):
self.assertIsNone(ec2_cred['trust_id'])
self._validate_signature(access=ec2_cred['access'],
secret=ec2_cred['secret'])
-
- return ec2_cred
+ uri = '/'.join([self._get_ec2_cred_uri(), ec2_cred['access']])
+ self.assertThat(ec2_cred['links']['self'],
+ matchers.EndsWith(uri))
def test_ec2_get_credential(self):
ec2_cred = self._get_ec2_cred()
uri = '/'.join([self._get_ec2_cred_uri(), ec2_cred['access']])
r = self.get(uri)
self.assertDictEqual(ec2_cred, r.result['credential'])
+ self.assertThat(ec2_cred['links']['self'],
+ matchers.EndsWith(uri))
def test_ec2_list_credentials(self):
"""Test ec2 credential listing."""
@@ -391,6 +395,8 @@ class TestCredentialEc2(CredentialBaseTestCase):
r = self.get(uri)
cred_list = r.result['credentials']
self.assertEqual(1, len(cred_list))
+ self.assertThat(r.result['links']['self'],
+ matchers.EndsWith(uri))
def test_ec2_delete_credential(self):
"""Test ec2 credential deletion."""
diff --git a/keystone-moon/keystone/tests/unit/test_v3_endpoint_policy.py b/keystone-moon/keystone/tests/unit/test_v3_endpoint_policy.py
index 437fb155..4daeff4d 100644
--- a/keystone-moon/keystone/tests/unit/test_v3_endpoint_policy.py
+++ b/keystone-moon/keystone/tests/unit/test_v3_endpoint_policy.py
@@ -17,13 +17,7 @@ from testtools import matchers
from keystone.tests.unit import test_v3
-class TestExtensionCase(test_v3.RestfulTestCase):
-
- EXTENSION_NAME = 'endpoint_policy'
- EXTENSION_TO_ADD = 'endpoint_policy_extension'
-
-
-class EndpointPolicyTestCase(TestExtensionCase):
+class EndpointPolicyTestCase(test_v3.RestfulTestCase):
"""Test endpoint policy CRUD.
In general, the controller layer of the endpoint policy extension is really
@@ -203,7 +197,7 @@ class EndpointPolicyTestCase(TestExtensionCase):
self.head(url, expected_status=404)
-class JsonHomeTests(TestExtensionCase, test_v3.JsonHomeTestMixin):
+class JsonHomeTests(test_v3.JsonHomeTestMixin):
EXTENSION_LOCATION = ('http://docs.openstack.org/api/openstack-identity/3/'
'ext/OS-ENDPOINT-POLICY/1.0/rel')
PARAM_LOCATION = 'http://docs.openstack.org/api/openstack-identity/3/param'
diff --git a/keystone-moon/keystone/tests/unit/test_v3_federation.py b/keystone-moon/keystone/tests/unit/test_v3_federation.py
index 3b6f4d8b..e646bc0a 100644
--- a/keystone-moon/keystone/tests/unit/test_v3_federation.py
+++ b/keystone-moon/keystone/tests/unit/test_v3_federation.py
@@ -13,26 +13,27 @@
import os
import random
import subprocess
+from testtools import matchers
import uuid
+import fixtures
from lxml import etree
import mock
from oslo_config import cfg
from oslo_log import log
-from oslo_serialization import jsonutils
+from oslo_utils import importutils
from oslotest import mockpatch
import saml2
from saml2 import saml
from saml2 import sigver
-from six.moves import urllib
-import xmldsig
+from six.moves import range, urllib, zip
+xmldsig = importutils.try_import("saml2.xmldsig")
+if not xmldsig:
+ xmldsig = importutils.try_import("xmldsig")
from keystone.auth import controllers as auth_controllers
-from keystone.auth.plugins import mapped
-from keystone.contrib import federation
from keystone.contrib.federation import controllers as federation_controllers
from keystone.contrib.federation import idp as keystone_idp
-from keystone.contrib.federation import utils as mapping_utils
from keystone import exception
from keystone import notifications
from keystone.tests.unit import core
@@ -68,7 +69,7 @@ class FederatedSetupMixin(object):
USER = 'user@ORGANIZATION'
ASSERTION_PREFIX = 'PREFIX_'
IDP_WITH_REMOTE = 'ORG_IDP_REMOTE'
- REMOTE_ID = 'entityID_IDP'
+ REMOTE_IDS = ['entityID_IDP1', 'entityID_IDP2']
REMOTE_ID_ATTR = uuid.uuid4().hex
UNSCOPED_V3_SAML2_REQ = {
@@ -108,14 +109,14 @@ class FederatedSetupMixin(object):
self.assertEqual(token_projects, projects_ref)
def _check_scoped_token_attributes(self, token):
- def xor_project_domain(iterable):
- return sum(('project' in iterable, 'domain' in iterable)) % 2
+ def xor_project_domain(token_keys):
+ return sum(('project' in token_keys, 'domain' in token_keys)) % 2
for obj in ('user', 'catalog', 'expires_at', 'issued_at',
'methods', 'roles'):
self.assertIn(obj, token)
# Check for either project or domain
- if not xor_project_domain(token.keys()):
+ if not xor_project_domain(list(token.keys())):
raise AssertionError("You must specify either"
"project or domain.")
@@ -123,6 +124,10 @@ class FederatedSetupMixin(object):
os_federation = token['user']['OS-FEDERATION']
self.assertEqual(self.IDP, os_federation['identity_provider']['id'])
self.assertEqual(self.PROTOCOL, os_federation['protocol']['id'])
+ self.assertListEqual(sorted(['groups',
+ 'identity_provider',
+ 'protocol']),
+ sorted(os_federation.keys()))
def _issue_unscoped_token(self,
idp=None,
@@ -327,7 +332,8 @@ class FederatedSetupMixin(object):
},
{
'user': {
- 'name': '{0}'
+ 'name': '{0}',
+ 'id': '{1}'
}
}
],
@@ -336,6 +342,9 @@ class FederatedSetupMixin(object):
'type': 'UserName'
},
{
+ 'type': 'Email',
+ },
+ {
'type': 'orgPersonType',
'any_one_of': [
'Employee'
@@ -352,7 +361,8 @@ class FederatedSetupMixin(object):
},
{
'user': {
- 'name': '{0}'
+ 'name': '{0}',
+ 'id': '{1}'
}
}
],
@@ -361,6 +371,9 @@ class FederatedSetupMixin(object):
'type': self.ASSERTION_PREFIX + 'UserName'
},
{
+ 'type': self.ASSERTION_PREFIX + 'Email',
+ },
+ {
'type': self.ASSERTION_PREFIX + 'orgPersonType',
'any_one_of': [
'SuperEmployee'
@@ -377,7 +390,8 @@ class FederatedSetupMixin(object):
},
{
'user': {
- 'name': '{0}'
+ 'name': '{0}',
+ 'id': '{1}'
}
}
],
@@ -386,6 +400,9 @@ class FederatedSetupMixin(object):
'type': 'UserName'
},
{
+ 'type': 'Email'
+ },
+ {
'type': 'orgPersonType',
'any_one_of': [
'Customer'
@@ -413,7 +430,8 @@ class FederatedSetupMixin(object):
{
'user': {
- 'name': '{0}'
+ 'name': '{0}',
+ 'id': '{1}'
}
}
],
@@ -422,6 +440,9 @@ class FederatedSetupMixin(object):
'type': 'UserName'
},
{
+ 'type': 'Email'
+ },
+ {
'type': 'orgPersonType',
'any_one_of': [
'Admin',
@@ -444,7 +465,8 @@ class FederatedSetupMixin(object):
},
{
'user': {
- 'name': '{0}'
+ 'name': '{0}',
+ 'id': '{1}'
}
}
],
@@ -453,6 +475,9 @@ class FederatedSetupMixin(object):
'type': 'UserName',
},
{
+ 'type': 'Email',
+ },
+ {
'type': 'FirstName',
'any_one_of': [
'Jill'
@@ -475,7 +500,8 @@ class FederatedSetupMixin(object):
},
{
'user': {
- 'name': '{0}'
+ 'name': '{0}',
+ 'id': '{1}'
}
}
],
@@ -485,6 +511,9 @@ class FederatedSetupMixin(object):
},
{
'type': 'Email',
+ },
+ {
+ 'type': 'Email',
'any_one_of': [
'testacct@example.com'
]
@@ -502,7 +531,8 @@ class FederatedSetupMixin(object):
"local": [
{
'user': {
- 'name': '{0}'
+ 'name': '{0}',
+ 'id': '{1}'
}
},
{
@@ -519,6 +549,9 @@ class FederatedSetupMixin(object):
'type': 'UserName',
},
{
+ 'type': 'Email',
+ },
+ {
"type": "orgPersonType",
"any_one_of": [
"CEO",
@@ -531,7 +564,8 @@ class FederatedSetupMixin(object):
"local": [
{
'user': {
- 'name': '{0}'
+ 'name': '{0}',
+ 'id': '{1}'
}
},
{
@@ -548,6 +582,9 @@ class FederatedSetupMixin(object):
"type": "UserName",
},
{
+ "type": "Email",
+ },
+ {
"type": "orgPersonType",
"any_one_of": [
"Managers"
@@ -559,7 +596,8 @@ class FederatedSetupMixin(object):
"local": [
{
"user": {
- "name": "{0}"
+ "name": "{0}",
+ "id": "{1}"
}
},
{
@@ -576,6 +614,9 @@ class FederatedSetupMixin(object):
"type": "UserName",
},
{
+ "type": "Email",
+ },
+ {
"type": "UserName",
"any_one_of": [
"IamTester"
@@ -639,7 +680,7 @@ class FederatedSetupMixin(object):
self.idp)
# Add IDP with remote
self.idp_with_remote = self.idp_ref(id=self.IDP_WITH_REMOTE)
- self.idp_with_remote['remote_id'] = self.REMOTE_ID
+ self.idp_with_remote['remote_ids'] = self.REMOTE_IDS
self.federation_api.create_idp(self.idp_with_remote['id'],
self.idp_with_remote)
# Add a mapping
@@ -793,28 +834,137 @@ class FederatedIdentityProviderTests(FederationTests):
return r
def test_create_idp(self):
- """Creates the IdentityProvider entity."""
+ """Creates the IdentityProvider entity associated to remote_ids."""
- keys_to_check = self.idp_keys
- body = self._http_idp_input()
+ keys_to_check = list(self.idp_keys)
+ body = self.default_body.copy()
+ body['description'] = uuid.uuid4().hex
resp = self._create_default_idp(body=body)
self.assertValidResponse(resp, 'identity_provider', dummy_validator,
keys_to_check=keys_to_check,
ref=body)
def test_create_idp_remote(self):
- """Creates the IdentityProvider entity associated to a remote_id."""
+ """Creates the IdentityProvider entity associated to remote_ids."""
keys_to_check = list(self.idp_keys)
- keys_to_check.append('remote_id')
+ keys_to_check.append('remote_ids')
body = self.default_body.copy()
body['description'] = uuid.uuid4().hex
- body['remote_id'] = uuid.uuid4().hex
+ body['remote_ids'] = [uuid.uuid4().hex,
+ uuid.uuid4().hex,
+ uuid.uuid4().hex]
resp = self._create_default_idp(body=body)
self.assertValidResponse(resp, 'identity_provider', dummy_validator,
keys_to_check=keys_to_check,
ref=body)
+ def test_create_idp_remote_repeated(self):
+ """Creates two IdentityProvider entities with some remote_ids
+
+ A remote_id is the same for both so the second IdP is not
+ created because of the uniqueness of the remote_ids
+
+ Expect HTTP 409 code for the latter call.
+
+ """
+
+ body = self.default_body.copy()
+ repeated_remote_id = uuid.uuid4().hex
+ body['remote_ids'] = [uuid.uuid4().hex,
+ uuid.uuid4().hex,
+ uuid.uuid4().hex,
+ repeated_remote_id]
+ self._create_default_idp(body=body)
+
+ url = self.base_url(suffix=uuid.uuid4().hex)
+ body['remote_ids'] = [uuid.uuid4().hex,
+ repeated_remote_id]
+ self.put(url, body={'identity_provider': body},
+ expected_status=409)
+
+ def test_create_idp_remote_empty(self):
+ """Creates an IdP with empty remote_ids."""
+
+ keys_to_check = list(self.idp_keys)
+ keys_to_check.append('remote_ids')
+ body = self.default_body.copy()
+ body['description'] = uuid.uuid4().hex
+ body['remote_ids'] = []
+ resp = self._create_default_idp(body=body)
+ self.assertValidResponse(resp, 'identity_provider', dummy_validator,
+ keys_to_check=keys_to_check,
+ ref=body)
+
+ def test_create_idp_remote_none(self):
+ """Creates an IdP with a None remote_ids."""
+
+ keys_to_check = list(self.idp_keys)
+ keys_to_check.append('remote_ids')
+ body = self.default_body.copy()
+ body['description'] = uuid.uuid4().hex
+ body['remote_ids'] = None
+ resp = self._create_default_idp(body=body)
+ expected = body.copy()
+ expected['remote_ids'] = []
+ self.assertValidResponse(resp, 'identity_provider', dummy_validator,
+ keys_to_check=keys_to_check,
+ ref=expected)
+
+ def test_update_idp_remote_ids(self):
+ """Update IdP's remote_ids parameter."""
+ body = self.default_body.copy()
+ body['remote_ids'] = [uuid.uuid4().hex]
+ default_resp = self._create_default_idp(body=body)
+ default_idp = self._fetch_attribute_from_response(default_resp,
+ 'identity_provider')
+ idp_id = default_idp.get('id')
+ url = self.base_url(suffix=idp_id)
+ self.assertIsNotNone(idp_id)
+
+ body['remote_ids'] = [uuid.uuid4().hex, uuid.uuid4().hex]
+
+ body = {'identity_provider': body}
+ resp = self.patch(url, body=body)
+ updated_idp = self._fetch_attribute_from_response(resp,
+ 'identity_provider')
+ body = body['identity_provider']
+ self.assertEqual(sorted(body['remote_ids']),
+ sorted(updated_idp.get('remote_ids')))
+
+ resp = self.get(url)
+ returned_idp = self._fetch_attribute_from_response(resp,
+ 'identity_provider')
+ self.assertEqual(sorted(body['remote_ids']),
+ sorted(returned_idp.get('remote_ids')))
+
+ def test_update_idp_clean_remote_ids(self):
+ """Update IdP's remote_ids parameter with an empty list."""
+ body = self.default_body.copy()
+ body['remote_ids'] = [uuid.uuid4().hex]
+ default_resp = self._create_default_idp(body=body)
+ default_idp = self._fetch_attribute_from_response(default_resp,
+ 'identity_provider')
+ idp_id = default_idp.get('id')
+ url = self.base_url(suffix=idp_id)
+ self.assertIsNotNone(idp_id)
+
+ body['remote_ids'] = []
+
+ body = {'identity_provider': body}
+ resp = self.patch(url, body=body)
+ updated_idp = self._fetch_attribute_from_response(resp,
+ 'identity_provider')
+ body = body['identity_provider']
+ self.assertEqual(sorted(body['remote_ids']),
+ sorted(updated_idp.get('remote_ids')))
+
+ resp = self.get(url)
+ returned_idp = self._fetch_attribute_from_response(resp,
+ 'identity_provider')
+ self.assertEqual(sorted(body['remote_ids']),
+ sorted(returned_idp.get('remote_ids')))
+
def test_list_idps(self, iterations=5):
"""Lists all available IdentityProviders.
@@ -899,6 +1049,33 @@ class FederatedIdentityProviderTests(FederationTests):
self.delete(url)
self.get(url, expected_status=404)
+ def test_delete_idp_also_deletes_assigned_protocols(self):
+ """Deleting an IdP will delete its assigned protocol."""
+
+ # create default IdP
+ default_resp = self._create_default_idp()
+ default_idp = self._fetch_attribute_from_response(default_resp,
+ 'identity_provider')
+ idp_id = default_idp['id']
+ protocol_id = uuid.uuid4().hex
+
+ url = self.base_url(suffix='%(idp_id)s/protocols/%(protocol_id)s')
+ idp_url = self.base_url(suffix=idp_id)
+
+ # assign protocol to IdP
+ kwargs = {'expected_status': 201}
+ resp, idp_id, proto = self._assign_protocol_to_idp(
+ url=url,
+ idp_id=idp_id,
+ proto=protocol_id,
+ **kwargs)
+
+ # removing IdP will remove the assigned protocol as well
+ self.assertEqual(1, len(self.federation_api.list_protocols(idp_id)))
+ self.delete(idp_url)
+ self.get(idp_url, expected_status=404)
+ self.assertEqual(0, len(self.federation_api.list_protocols(idp_id)))
+
def test_delete_nonexisting_idp(self):
"""Delete nonexisting IdP.
@@ -918,7 +1095,7 @@ class FederatedIdentityProviderTests(FederationTests):
self.assertIsNotNone(idp_id)
_enabled = not default_idp.get('enabled')
- body = {'remote_id': uuid.uuid4().hex,
+ body = {'remote_ids': [uuid.uuid4().hex, uuid.uuid4().hex],
'description': uuid.uuid4().hex,
'enabled': _enabled}
@@ -928,13 +1105,21 @@ class FederatedIdentityProviderTests(FederationTests):
'identity_provider')
body = body['identity_provider']
for key in body.keys():
- self.assertEqual(body[key], updated_idp.get(key))
+ if isinstance(body[key], list):
+ self.assertEqual(sorted(body[key]),
+ sorted(updated_idp.get(key)))
+ else:
+ self.assertEqual(body[key], updated_idp.get(key))
resp = self.get(url)
updated_idp = self._fetch_attribute_from_response(resp,
'identity_provider')
for key in body.keys():
- self.assertEqual(body[key], updated_idp.get(key))
+ if isinstance(body[key], list):
+ self.assertEqual(sorted(body[key]),
+ sorted(updated_idp.get(key)))
+ else:
+ self.assertEqual(body[key], updated_idp.get(key))
def test_update_idp_immutable_attributes(self):
"""Update IdP's immutable parameters.
@@ -1126,7 +1311,7 @@ class MappingCRUDTests(FederationTests):
self.assertIsNotNone(entity.get('id'))
self.assertIsNotNone(entity.get('rules'))
if ref:
- self.assertEqual(jsonutils.loads(entity['rules']), ref['rules'])
+ self.assertEqual(entity['rules'], ref['rules'])
return entity
def _create_default_mapping_entry(self):
@@ -1262,594 +1447,11 @@ class MappingCRUDTests(FederationTests):
self.put(url, expected_status=400, body={'mapping': mapping})
-class MappingRuleEngineTests(FederationTests):
- """A class for testing the mapping rule engine."""
-
- def assertValidMappedUserObject(self, mapped_properties,
- user_type='ephemeral',
- domain_id=None):
- """Check whether mapped properties object has 'user' within.
-
- According to today's rules, RuleProcessor does not have to issue user's
- id or name. What's actually required is user's type and for ephemeral
- users that would be service domain named 'Federated'.
- """
- self.assertIn('user', mapped_properties,
- message='Missing user object in mapped properties')
- user = mapped_properties['user']
- self.assertIn('type', user)
- self.assertEqual(user_type, user['type'])
- self.assertIn('domain', user)
- domain = user['domain']
- domain_name_or_id = domain.get('id') or domain.get('name')
- domain_ref = domain_id or federation.FEDERATED_DOMAIN_KEYWORD
- self.assertEqual(domain_ref, domain_name_or_id)
-
- def test_rule_engine_any_one_of_and_direct_mapping(self):
- """Should return user's name and group id EMPLOYEE_GROUP_ID.
-
- The ADMIN_ASSERTION should successfully have a match in MAPPING_LARGE.
- They will test the case where `any_one_of` is valid, and there is
- a direct mapping for the users name.
-
- """
-
- mapping = mapping_fixtures.MAPPING_LARGE
- assertion = mapping_fixtures.ADMIN_ASSERTION
- rp = mapping_utils.RuleProcessor(mapping['rules'])
- values = rp.process(assertion)
-
- fn = assertion.get('FirstName')
- ln = assertion.get('LastName')
- full_name = '%s %s' % (fn, ln)
- group_ids = values.get('group_ids')
- user_name = values.get('user', {}).get('name')
-
- self.assertIn(mapping_fixtures.EMPLOYEE_GROUP_ID, group_ids)
- self.assertEqual(full_name, user_name)
-
- def test_rule_engine_no_regex_match(self):
- """Should deny authorization, the email of the tester won't match.
-
- This will not match since the email in the assertion will fail
- the regex test. It is set to match any @example.com address.
- But the incoming value is set to eviltester@example.org.
- RuleProcessor should return list of empty group_ids.
-
- """
-
- mapping = mapping_fixtures.MAPPING_LARGE
- assertion = mapping_fixtures.BAD_TESTER_ASSERTION
- rp = mapping_utils.RuleProcessor(mapping['rules'])
- mapped_properties = rp.process(assertion)
-
- self.assertValidMappedUserObject(mapped_properties)
- self.assertIsNone(mapped_properties['user'].get('name'))
- self.assertListEqual(list(), mapped_properties['group_ids'])
-
- def test_rule_engine_regex_many_groups(self):
- """Should return group CONTRACTOR_GROUP_ID.
-
- The TESTER_ASSERTION should successfully have a match in
- MAPPING_TESTER_REGEX. This will test the case where many groups
- are in the assertion, and a regex value is used to try and find
- a match.
-
- """
-
- mapping = mapping_fixtures.MAPPING_TESTER_REGEX
- assertion = mapping_fixtures.TESTER_ASSERTION
- rp = mapping_utils.RuleProcessor(mapping['rules'])
- values = rp.process(assertion)
-
- self.assertValidMappedUserObject(values)
- user_name = assertion.get('UserName')
- group_ids = values.get('group_ids')
- name = values.get('user', {}).get('name')
-
- self.assertEqual(user_name, name)
- self.assertIn(mapping_fixtures.TESTER_GROUP_ID, group_ids)
-
- def test_rule_engine_any_one_of_many_rules(self):
- """Should return group CONTRACTOR_GROUP_ID.
-
- The CONTRACTOR_ASSERTION should successfully have a match in
- MAPPING_SMALL. This will test the case where many rules
- must be matched, including an `any_one_of`, and a direct
- mapping.
-
- """
-
- mapping = mapping_fixtures.MAPPING_SMALL
- assertion = mapping_fixtures.CONTRACTOR_ASSERTION
- rp = mapping_utils.RuleProcessor(mapping['rules'])
- values = rp.process(assertion)
-
- self.assertValidMappedUserObject(values)
- user_name = assertion.get('UserName')
- group_ids = values.get('group_ids')
- name = values.get('user', {}).get('name')
-
- self.assertEqual(user_name, name)
- self.assertIn(mapping_fixtures.CONTRACTOR_GROUP_ID, group_ids)
-
- def test_rule_engine_not_any_of_and_direct_mapping(self):
- """Should return user's name and email.
-
- The CUSTOMER_ASSERTION should successfully have a match in
- MAPPING_LARGE. This will test the case where a requirement
- has `not_any_of`, and direct mapping to a username, no group.
-
- """
-
- mapping = mapping_fixtures.MAPPING_LARGE
- assertion = mapping_fixtures.CUSTOMER_ASSERTION
- rp = mapping_utils.RuleProcessor(mapping['rules'])
- values = rp.process(assertion)
-
- self.assertValidMappedUserObject(values)
- user_name = assertion.get('UserName')
- group_ids = values.get('group_ids')
- name = values.get('user', {}).get('name')
-
- self.assertEqual(user_name, name)
- self.assertEqual([], group_ids,)
-
- def test_rule_engine_not_any_of_many_rules(self):
- """Should return group EMPLOYEE_GROUP_ID.
-
- The EMPLOYEE_ASSERTION should successfully have a match in
- MAPPING_SMALL. This will test the case where many remote
- rules must be matched, including a `not_any_of`.
-
- """
-
- mapping = mapping_fixtures.MAPPING_SMALL
- assertion = mapping_fixtures.EMPLOYEE_ASSERTION
- rp = mapping_utils.RuleProcessor(mapping['rules'])
- values = rp.process(assertion)
-
- self.assertValidMappedUserObject(values)
- user_name = assertion.get('UserName')
- group_ids = values.get('group_ids')
- name = values.get('user', {}).get('name')
-
- self.assertEqual(user_name, name)
- self.assertIn(mapping_fixtures.EMPLOYEE_GROUP_ID, group_ids)
-
- def test_rule_engine_not_any_of_regex_verify_pass(self):
- """Should return group DEVELOPER_GROUP_ID.
-
- The DEVELOPER_ASSERTION should successfully have a match in
- MAPPING_DEVELOPER_REGEX. This will test the case where many
- remote rules must be matched, including a `not_any_of`, with
- regex set to True.
-
- """
-
- mapping = mapping_fixtures.MAPPING_DEVELOPER_REGEX
- assertion = mapping_fixtures.DEVELOPER_ASSERTION
- rp = mapping_utils.RuleProcessor(mapping['rules'])
- values = rp.process(assertion)
-
- self.assertValidMappedUserObject(values)
- user_name = assertion.get('UserName')
- group_ids = values.get('group_ids')
- name = values.get('user', {}).get('name')
-
- self.assertEqual(user_name, name)
- self.assertIn(mapping_fixtures.DEVELOPER_GROUP_ID, group_ids)
-
- def test_rule_engine_not_any_of_regex_verify_fail(self):
- """Should deny authorization.
-
- The email in the assertion will fail the regex test.
- It is set to reject any @example.org address, but the
- incoming value is set to evildeveloper@example.org.
- RuleProcessor should return list of empty group_ids.
-
- """
-
- mapping = mapping_fixtures.MAPPING_DEVELOPER_REGEX
- assertion = mapping_fixtures.BAD_DEVELOPER_ASSERTION
- rp = mapping_utils.RuleProcessor(mapping['rules'])
- mapped_properties = rp.process(assertion)
-
- self.assertValidMappedUserObject(mapped_properties)
- self.assertIsNone(mapped_properties['user'].get('name'))
- self.assertListEqual(list(), mapped_properties['group_ids'])
-
- def _rule_engine_regex_match_and_many_groups(self, assertion):
- """Should return group DEVELOPER_GROUP_ID and TESTER_GROUP_ID.
-
- A helper function injecting assertion passed as an argument.
- Expect DEVELOPER_GROUP_ID and TESTER_GROUP_ID in the results.
-
- """
-
- mapping = mapping_fixtures.MAPPING_LARGE
- rp = mapping_utils.RuleProcessor(mapping['rules'])
- values = rp.process(assertion)
-
- user_name = assertion.get('UserName')
- group_ids = values.get('group_ids')
- name = values.get('user', {}).get('name')
-
- self.assertValidMappedUserObject(values)
- self.assertEqual(user_name, name)
- self.assertIn(mapping_fixtures.DEVELOPER_GROUP_ID, group_ids)
- self.assertIn(mapping_fixtures.TESTER_GROUP_ID, group_ids)
-
- def test_rule_engine_regex_match_and_many_groups(self):
- """Should return group DEVELOPER_GROUP_ID and TESTER_GROUP_ID.
-
- The TESTER_ASSERTION should successfully have a match in
- MAPPING_LARGE. This will test a successful regex match
- for an `any_one_of` evaluation type, and will have many
- groups returned.
-
- """
- self._rule_engine_regex_match_and_many_groups(
- mapping_fixtures.TESTER_ASSERTION)
-
- def test_rule_engine_discards_nonstring_objects(self):
- """Check whether RuleProcessor discards non string objects.
-
- Despite the fact that assertion is malformed and contains
- non string objects, RuleProcessor should correctly discard them and
- successfully have a match in MAPPING_LARGE.
-
- """
- self._rule_engine_regex_match_and_many_groups(
- mapping_fixtures.MALFORMED_TESTER_ASSERTION)
-
- def test_rule_engine_fails_after_discarding_nonstring(self):
- """Check whether RuleProcessor discards non string objects.
-
- Expect RuleProcessor to discard non string object, which
- is required for a correct rule match. RuleProcessor will result with
- empty list of groups.
-
- """
- mapping = mapping_fixtures.MAPPING_SMALL
- rp = mapping_utils.RuleProcessor(mapping['rules'])
- assertion = mapping_fixtures.CONTRACTOR_MALFORMED_ASSERTION
- mapped_properties = rp.process(assertion)
- self.assertValidMappedUserObject(mapped_properties)
- self.assertIsNone(mapped_properties['user'].get('name'))
- self.assertListEqual(list(), mapped_properties['group_ids'])
-
- def test_rule_engine_returns_group_names(self):
- """Check whether RuleProcessor returns group names with their domains.
-
- RuleProcessor should return 'group_names' entry with a list of
- dictionaries with two entries 'name' and 'domain' identifying group by
- its name and domain.
-
- """
- mapping = mapping_fixtures.MAPPING_GROUP_NAMES
- rp = mapping_utils.RuleProcessor(mapping['rules'])
- assertion = mapping_fixtures.EMPLOYEE_ASSERTION
- mapped_properties = rp.process(assertion)
- self.assertIsNotNone(mapped_properties)
- self.assertValidMappedUserObject(mapped_properties)
- reference = {
- mapping_fixtures.DEVELOPER_GROUP_NAME:
- {
- "name": mapping_fixtures.DEVELOPER_GROUP_NAME,
- "domain": {
- "name": mapping_fixtures.DEVELOPER_GROUP_DOMAIN_NAME
- }
- },
- mapping_fixtures.TESTER_GROUP_NAME:
- {
- "name": mapping_fixtures.TESTER_GROUP_NAME,
- "domain": {
- "id": mapping_fixtures.DEVELOPER_GROUP_DOMAIN_ID
- }
- }
- }
- for rule in mapped_properties['group_names']:
- self.assertDictEqual(reference.get(rule.get('name')), rule)
-
- def test_rule_engine_whitelist_and_direct_groups_mapping(self):
- """Should return user's groups Developer and Contractor.
-
- The EMPLOYEE_ASSERTION_MULTIPLE_GROUPS should successfully have a match
- in MAPPING_GROUPS_WHITELIST. It will test the case where 'whitelist'
- correctly filters out Manager and only allows Developer and Contractor.
-
- """
-
- mapping = mapping_fixtures.MAPPING_GROUPS_WHITELIST
- assertion = mapping_fixtures.EMPLOYEE_ASSERTION_MULTIPLE_GROUPS
- rp = mapping_utils.RuleProcessor(mapping['rules'])
- mapped_properties = rp.process(assertion)
- self.assertIsNotNone(mapped_properties)
-
- reference = {
- mapping_fixtures.DEVELOPER_GROUP_NAME:
- {
- "name": mapping_fixtures.DEVELOPER_GROUP_NAME,
- "domain": {
- "id": mapping_fixtures.DEVELOPER_GROUP_DOMAIN_ID
- }
- },
- mapping_fixtures.CONTRACTOR_GROUP_NAME:
- {
- "name": mapping_fixtures.CONTRACTOR_GROUP_NAME,
- "domain": {
- "id": mapping_fixtures.DEVELOPER_GROUP_DOMAIN_ID
- }
- }
- }
- for rule in mapped_properties['group_names']:
- self.assertDictEqual(reference.get(rule.get('name')), rule)
-
- self.assertEqual('tbo', mapped_properties['user']['name'])
- self.assertEqual([], mapped_properties['group_ids'])
-
- def test_rule_engine_blacklist_and_direct_groups_mapping(self):
- """Should return user's group Developer.
-
- The EMPLOYEE_ASSERTION_MULTIPLE_GROUPS should successfully have a match
- in MAPPING_GROUPS_BLACKLIST. It will test the case where 'blacklist'
- correctly filters out Manager and Developer and only allows Contractor.
-
- """
-
- mapping = mapping_fixtures.MAPPING_GROUPS_BLACKLIST
- assertion = mapping_fixtures.EMPLOYEE_ASSERTION_MULTIPLE_GROUPS
- rp = mapping_utils.RuleProcessor(mapping['rules'])
- mapped_properties = rp.process(assertion)
- self.assertIsNotNone(mapped_properties)
-
- reference = {
- mapping_fixtures.CONTRACTOR_GROUP_NAME:
- {
- "name": mapping_fixtures.CONTRACTOR_GROUP_NAME,
- "domain": {
- "id": mapping_fixtures.DEVELOPER_GROUP_DOMAIN_ID
- }
- }
- }
- for rule in mapped_properties['group_names']:
- self.assertDictEqual(reference.get(rule.get('name')), rule)
- self.assertEqual('tbo', mapped_properties['user']['name'])
- self.assertEqual([], mapped_properties['group_ids'])
-
- def test_rule_engine_blacklist_and_direct_groups_mapping_multiples(self):
- """Tests matching multiple values before the blacklist.
-
- Verifies that the local indexes are correct when matching multiple
- remote values for a field when the field occurs before the blacklist
- entry in the remote rules.
-
- """
-
- mapping = mapping_fixtures.MAPPING_GROUPS_BLACKLIST_MULTIPLES
- assertion = mapping_fixtures.EMPLOYEE_ASSERTION_MULTIPLE_GROUPS
- rp = mapping_utils.RuleProcessor(mapping['rules'])
- mapped_properties = rp.process(assertion)
- self.assertIsNotNone(mapped_properties)
-
- reference = {
- mapping_fixtures.CONTRACTOR_GROUP_NAME:
- {
- "name": mapping_fixtures.CONTRACTOR_GROUP_NAME,
- "domain": {
- "id": mapping_fixtures.DEVELOPER_GROUP_DOMAIN_ID
- }
- }
- }
- for rule in mapped_properties['group_names']:
- self.assertDictEqual(reference.get(rule.get('name')), rule)
- self.assertEqual('tbo', mapped_properties['user']['name'])
- self.assertEqual([], mapped_properties['group_ids'])
-
- def test_rule_engine_whitelist_direct_group_mapping_missing_domain(self):
- """Test if the local rule is rejected upon missing domain value
-
- This is a variation with a ``whitelist`` filter.
-
- """
- mapping = mapping_fixtures.MAPPING_GROUPS_WHITELIST_MISSING_DOMAIN
- assertion = mapping_fixtures.EMPLOYEE_ASSERTION_MULTIPLE_GROUPS
- rp = mapping_utils.RuleProcessor(mapping['rules'])
- self.assertRaises(exception.ValidationError, rp.process, assertion)
-
- def test_rule_engine_blacklist_direct_group_mapping_missing_domain(self):
- """Test if the local rule is rejected upon missing domain value
-
- This is a variation with a ``blacklist`` filter.
-
- """
- mapping = mapping_fixtures.MAPPING_GROUPS_BLACKLIST_MISSING_DOMAIN
- assertion = mapping_fixtures.EMPLOYEE_ASSERTION_MULTIPLE_GROUPS
- rp = mapping_utils.RuleProcessor(mapping['rules'])
- self.assertRaises(exception.ValidationError, rp.process, assertion)
-
- def test_rule_engine_no_groups_allowed(self):
- """Should return user mapped to no groups.
-
- The EMPLOYEE_ASSERTION should successfully have a match
- in MAPPING_GROUPS_WHITELIST, but 'whitelist' should filter out
- the group values from the assertion and thus map to no groups.
-
- """
- mapping = mapping_fixtures.MAPPING_GROUPS_WHITELIST
- assertion = mapping_fixtures.EMPLOYEE_ASSERTION
- rp = mapping_utils.RuleProcessor(mapping['rules'])
- mapped_properties = rp.process(assertion)
- self.assertIsNotNone(mapped_properties)
- self.assertListEqual(mapped_properties['group_names'], [])
- self.assertListEqual(mapped_properties['group_ids'], [])
- self.assertEqual('tbo', mapped_properties['user']['name'])
-
- def test_mapping_federated_domain_specified(self):
- """Test mapping engine when domain 'ephemeral' is explicitely set.
-
- For that, we use mapping rule MAPPING_EPHEMERAL_USER and assertion
- EMPLOYEE_ASSERTION
-
- """
- mapping = mapping_fixtures.MAPPING_EPHEMERAL_USER
- rp = mapping_utils.RuleProcessor(mapping['rules'])
- assertion = mapping_fixtures.EMPLOYEE_ASSERTION
- mapped_properties = rp.process(assertion)
- self.assertIsNotNone(mapped_properties)
- self.assertValidMappedUserObject(mapped_properties)
-
- def test_create_user_object_with_bad_mapping(self):
- """Test if user object is created even with bad mapping.
-
- User objects will be created by mapping engine always as long as there
- is corresponding local rule. This test shows, that even with assertion
- where no group names nor ids are matched, but there is 'blind' rule for
- mapping user, such object will be created.
-
- In this test MAPPING_EHPEMERAL_USER expects UserName set to jsmith
- whereas value from assertion is 'tbo'.
-
- """
- mapping = mapping_fixtures.MAPPING_EPHEMERAL_USER
- rp = mapping_utils.RuleProcessor(mapping['rules'])
- assertion = mapping_fixtures.CONTRACTOR_ASSERTION
- mapped_properties = rp.process(assertion)
- self.assertIsNotNone(mapped_properties)
- self.assertValidMappedUserObject(mapped_properties)
-
- self.assertNotIn('id', mapped_properties['user'])
- self.assertNotIn('name', mapped_properties['user'])
-
- def test_set_ephemeral_domain_to_ephemeral_users(self):
- """Test auto assigning service domain to ephemeral users.
-
- Test that ephemeral users will always become members of federated
- service domain. The check depends on ``type`` value which must be set
- to ``ephemeral`` in case of ephemeral user.
-
- """
- mapping = mapping_fixtures.MAPPING_EPHEMERAL_USER_LOCAL_DOMAIN
- rp = mapping_utils.RuleProcessor(mapping['rules'])
- assertion = mapping_fixtures.CONTRACTOR_ASSERTION
- mapped_properties = rp.process(assertion)
- self.assertIsNotNone(mapped_properties)
- self.assertValidMappedUserObject(mapped_properties)
-
- def test_local_user_local_domain(self):
- """Test that local users can have non-service domains assigned."""
- mapping = mapping_fixtures.MAPPING_LOCAL_USER_LOCAL_DOMAIN
- rp = mapping_utils.RuleProcessor(mapping['rules'])
- assertion = mapping_fixtures.CONTRACTOR_ASSERTION
- mapped_properties = rp.process(assertion)
- self.assertIsNotNone(mapped_properties)
- self.assertValidMappedUserObject(
- mapped_properties, user_type='local',
- domain_id=mapping_fixtures.LOCAL_DOMAIN)
-
- def test_user_identifications_name(self):
- """Test varius mapping options and how users are identified.
-
- This test calls mapped.setup_username() for propagating user object.
-
- Test plan:
- - Check if the user has proper domain ('federated') set
- - Check if the user has property type set ('ephemeral')
- - Check if user's name is properly mapped from the assertion
- - Check if user's id is properly set and equal to name, as it was not
- explicitely specified in the mapping.
-
- """
- mapping = mapping_fixtures.MAPPING_USER_IDS
- rp = mapping_utils.RuleProcessor(mapping['rules'])
- assertion = mapping_fixtures.CONTRACTOR_ASSERTION
- mapped_properties = rp.process(assertion)
- self.assertIsNotNone(mapped_properties)
- self.assertValidMappedUserObject(mapped_properties)
- mapped.setup_username({}, mapped_properties)
- self.assertEqual('jsmith', mapped_properties['user']['id'])
- self.assertEqual('jsmith', mapped_properties['user']['name'])
-
- def test_user_identifications_name_and_federated_domain(self):
- """Test varius mapping options and how users are identified.
-
- This test calls mapped.setup_username() for propagating user object.
-
- Test plan:
- - Check if the user has proper domain ('federated') set
- - Check if the user has propert type set ('ephemeral')
- - Check if user's name is properly mapped from the assertion
- - Check if user's id is properly set and equal to name, as it was not
- explicitely specified in the mapping.
-
- """
- mapping = mapping_fixtures.MAPPING_USER_IDS
- rp = mapping_utils.RuleProcessor(mapping['rules'])
- assertion = mapping_fixtures.EMPLOYEE_ASSERTION
- mapped_properties = rp.process(assertion)
- self.assertIsNotNone(mapped_properties)
- self.assertValidMappedUserObject(mapped_properties)
- mapped.setup_username({}, mapped_properties)
- self.assertEqual('tbo', mapped_properties['user']['name'])
- self.assertEqual('tbo', mapped_properties['user']['id'])
-
- def test_user_identification_id(self):
- """Test varius mapping options and how users are identified.
-
- This test calls mapped.setup_username() for propagating user object.
-
- Test plan:
- - Check if the user has proper domain ('federated') set
- - Check if the user has propert type set ('ephemeral')
- - Check if user's id is properly mapped from the assertion
- - Check if user's name is properly set and equal to id, as it was not
- explicitely specified in the mapping.
-
- """
- mapping = mapping_fixtures.MAPPING_USER_IDS
- rp = mapping_utils.RuleProcessor(mapping['rules'])
- assertion = mapping_fixtures.ADMIN_ASSERTION
- mapped_properties = rp.process(assertion)
- context = {'environment': {}}
- self.assertIsNotNone(mapped_properties)
- self.assertValidMappedUserObject(mapped_properties)
- mapped.setup_username(context, mapped_properties)
- self.assertEqual('bob', mapped_properties['user']['name'])
- self.assertEqual('bob', mapped_properties['user']['id'])
-
- def test_user_identification_id_and_name(self):
- """Test varius mapping options and how users are identified.
-
- This test calls mapped.setup_username() for propagating user object.
-
- Test plan:
- - Check if the user has proper domain ('federated') set
- - Check if the user has proper type set ('ephemeral')
- - Check if user's name is properly mapped from the assertion
- - Check if user's id is properly set and and equal to value hardcoded
- in the mapping
-
- """
- mapping = mapping_fixtures.MAPPING_USER_IDS
- rp = mapping_utils.RuleProcessor(mapping['rules'])
- assertion = mapping_fixtures.CUSTOMER_ASSERTION
- mapped_properties = rp.process(assertion)
- context = {'environment': {}}
- self.assertIsNotNone(mapped_properties)
- self.assertValidMappedUserObject(mapped_properties)
- mapped.setup_username(context, mapped_properties)
- self.assertEqual('bwilliams', mapped_properties['user']['name'])
- self.assertEqual('abc123', mapped_properties['user']['id'])
-
-
class FederatedTokenTests(FederationTests, FederatedSetupMixin):
def auth_plugin_config_override(self):
methods = ['saml2']
- method_classes = {'saml2': 'keystone.auth.plugins.saml2.Saml2'}
- super(FederatedTokenTests, self).auth_plugin_config_override(
- methods, **method_classes)
+ super(FederatedTokenTests, self).auth_plugin_config_override(methods)
def setUp(self):
super(FederatedTokenTests, self).setUp()
@@ -1923,7 +1525,8 @@ class FederatedTokenTests(FederationTests, FederatedSetupMixin):
def test_issue_unscoped_token_with_remote_no_attribute(self):
r = self._issue_unscoped_token(idp=self.IDP_WITH_REMOTE,
environment={
- self.REMOTE_ID_ATTR: self.REMOTE_ID
+ self.REMOTE_ID_ATTR:
+ self.REMOTE_IDS[0]
})
self.assertIsNotNone(r.headers.get('X-Subject-Token'))
@@ -1932,7 +1535,18 @@ class FederatedTokenTests(FederationTests, FederatedSetupMixin):
remote_id_attribute=self.REMOTE_ID_ATTR)
r = self._issue_unscoped_token(idp=self.IDP_WITH_REMOTE,
environment={
- self.REMOTE_ID_ATTR: self.REMOTE_ID
+ self.REMOTE_ID_ATTR:
+ self.REMOTE_IDS[0]
+ })
+ self.assertIsNotNone(r.headers.get('X-Subject-Token'))
+
+ def test_issue_unscoped_token_with_saml2_remote(self):
+ self.config_fixture.config(group='saml2',
+ remote_id_attribute=self.REMOTE_ID_ATTR)
+ r = self._issue_unscoped_token(idp=self.IDP_WITH_REMOTE,
+ environment={
+ self.REMOTE_ID_ATTR:
+ self.REMOTE_IDS[0]
})
self.assertIsNotNone(r.headers.get('X-Subject-Token'))
@@ -1946,6 +1560,25 @@ class FederatedTokenTests(FederationTests, FederatedSetupMixin):
self.REMOTE_ID_ATTR: uuid.uuid4().hex
})
+ def test_issue_unscoped_token_with_remote_default_overwritten(self):
+ """Test that protocol remote_id_attribute has higher priority.
+
+ Make sure the parameter stored under ``protocol`` section has higher
+ priority over parameter from default ``federation`` configuration
+ section.
+
+ """
+ self.config_fixture.config(group='saml2',
+ remote_id_attribute=self.REMOTE_ID_ATTR)
+ self.config_fixture.config(group='federation',
+ remote_id_attribute=uuid.uuid4().hex)
+ r = self._issue_unscoped_token(idp=self.IDP_WITH_REMOTE,
+ environment={
+ self.REMOTE_ID_ATTR:
+ self.REMOTE_IDS[0]
+ })
+ self.assertIsNotNone(r.headers.get('X-Subject-Token'))
+
def test_issue_unscoped_token_with_remote_unavailable(self):
self.config_fixture.config(group='federation',
remote_id_attribute=self.REMOTE_ID_ATTR)
@@ -1979,7 +1612,7 @@ class FederatedTokenTests(FederationTests, FederatedSetupMixin):
context = {
'environment': {
'malformed_object': object(),
- 'another_bad_idea': tuple(xrange(10)),
+ 'another_bad_idea': tuple(range(10)),
'yet_another_bad_param': dict(zip(uuid.uuid4().hex,
range(32)))
}
@@ -2156,6 +1789,44 @@ class FederatedTokenTests(FederationTests, FederatedSetupMixin):
self.assertEqual(projects_ref, projects,
'match failed for url %s' % url)
+ # TODO(samueldmq): Create another test class for role inheritance tests.
+ # The advantage would be to reduce the complexity of this test class and
+ # have tests specific to this fuctionality grouped, easing readability and
+ # maintenability.
+ def test_list_projects_for_inherited_project_assignment(self):
+ # Enable os_inherit extension
+ self.config_fixture.config(group='os_inherit', enabled=True)
+
+ # Create a subproject
+ subproject_inherited = self.new_project_ref(
+ domain_id=self.domainD['id'],
+ parent_id=self.project_inherited['id'])
+ self.resource_api.create_project(subproject_inherited['id'],
+ subproject_inherited)
+
+ # Create an inherited role assignment
+ self.assignment_api.create_grant(
+ role_id=self.role_employee['id'],
+ group_id=self.group_employees['id'],
+ project_id=self.project_inherited['id'],
+ inherited_to_projects=True)
+
+ # Define expected projects from employee assertion, which contain
+ # the created subproject
+ expected_project_ids = [self.project_all['id'],
+ self.proj_employees['id'],
+ subproject_inherited['id']]
+
+ # Assert expected projects for both available URLs
+ for url in ('/OS-FEDERATION/projects', '/auth/projects'):
+ r = self.get(url, token=self.tokens['EMPLOYEE_ASSERTION'])
+ project_ids = [project['id'] for project in r.result['projects']]
+
+ self.assertEqual(len(expected_project_ids), len(project_ids))
+ for expected_project_id in expected_project_ids:
+ self.assertIn(expected_project_id, project_ids,
+ 'Projects match failed for url %s' % url)
+
def test_list_domains(self):
urls = ('/OS-FEDERATION/domains', '/auth/domains')
@@ -2325,7 +1996,6 @@ class FederatedTokenTests(FederationTests, FederatedSetupMixin):
"remote": [
{
"type": "REMOTE_USER_GROUPS",
- "blacklist": ["noblacklist"]
}
]
}
@@ -2333,10 +2003,290 @@ class FederatedTokenTests(FederationTests, FederatedSetupMixin):
}
self.federation_api.update_mapping(self.mapping['id'], rules)
+ def test_empty_blacklist_passess_all_values(self):
+ """Test a mapping with empty blacklist specified
+
+ Not adding a ``blacklist`` keyword to the mapping rules has the same
+ effect as adding an empty ``blacklist``.
+ In both cases, the mapping engine will not discard any groups that are
+ associated with apache environment variables.
+
+ This test checks scenario where an empty blacklist was specified.
+ Expected result is to allow any value.
+
+ The test scenario is as follows:
+ - Create group ``EXISTS``
+ - Create group ``NO_EXISTS``
+ - Set mapping rules for existing IdP with a blacklist
+ that passes through as REMOTE_USER_GROUPS
+ - Issue unscoped token with groups ``EXISTS`` and ``NO_EXISTS``
+ assigned
+
+ """
+
+ domain_id = self.domainA['id']
+ domain_name = self.domainA['name']
+
+ # Add a group "EXISTS"
+ group_exists = self.new_group_ref(domain_id=domain_id)
+ group_exists['name'] = 'EXISTS'
+ group_exists = self.identity_api.create_group(group_exists)
+
+ # Add a group "NO_EXISTS"
+ group_no_exists = self.new_group_ref(domain_id=domain_id)
+ group_no_exists['name'] = 'NO_EXISTS'
+ group_no_exists = self.identity_api.create_group(group_no_exists)
+
+ group_ids = set([group_exists['id'], group_no_exists['id']])
+
+ rules = {
+ 'rules': [
+ {
+ "local": [
+ {
+ "user": {
+ "name": "{0}",
+ "id": "{0}"
+ }
+ }
+ ],
+ "remote": [
+ {
+ "type": "REMOTE_USER"
+ }
+ ]
+ },
+ {
+ "local": [
+ {
+ "groups": "{0}",
+ "domain": {"name": domain_name}
+ }
+ ],
+ "remote": [
+ {
+ "type": "REMOTE_USER_GROUPS",
+ "blacklist": []
+ }
+ ]
+ }
+ ]
+ }
+ self.federation_api.update_mapping(self.mapping['id'], rules)
+ r = self._issue_unscoped_token(assertion='UNMATCHED_GROUP_ASSERTION')
+ assigned_group_ids = r.json['token']['user']['OS-FEDERATION']['groups']
+ self.assertEqual(len(group_ids), len(assigned_group_ids))
+ for group in assigned_group_ids:
+ self.assertIn(group['id'], group_ids)
+
+ def test_not_adding_blacklist_passess_all_values(self):
+ """Test a mapping without blacklist specified.
+
+ Not adding a ``blacklist`` keyword to the mapping rules has the same
+ effect as adding an empty ``blacklist``. In both cases all values will
+ be accepted and passed.
+
+ This test checks scenario where an blacklist was not specified.
+ Expected result is to allow any value.
+
+ The test scenario is as follows:
+ - Create group ``EXISTS``
+ - Create group ``NO_EXISTS``
+ - Set mapping rules for existing IdP with a blacklist
+ that passes through as REMOTE_USER_GROUPS
+ - Issue unscoped token with on groups ``EXISTS`` and ``NO_EXISTS``
+ assigned
+
+ """
+
+ domain_id = self.domainA['id']
+ domain_name = self.domainA['name']
+
+ # Add a group "EXISTS"
+ group_exists = self.new_group_ref(domain_id=domain_id)
+ group_exists['name'] = 'EXISTS'
+ group_exists = self.identity_api.create_group(group_exists)
+
+ # Add a group "NO_EXISTS"
+ group_no_exists = self.new_group_ref(domain_id=domain_id)
+ group_no_exists['name'] = 'NO_EXISTS'
+ group_no_exists = self.identity_api.create_group(group_no_exists)
+
+ group_ids = set([group_exists['id'], group_no_exists['id']])
+
+ rules = {
+ 'rules': [
+ {
+ "local": [
+ {
+ "user": {
+ "name": "{0}",
+ "id": "{0}"
+ }
+ }
+ ],
+ "remote": [
+ {
+ "type": "REMOTE_USER"
+ }
+ ]
+ },
+ {
+ "local": [
+ {
+ "groups": "{0}",
+ "domain": {"name": domain_name}
+ }
+ ],
+ "remote": [
+ {
+ "type": "REMOTE_USER_GROUPS",
+ }
+ ]
+ }
+ ]
+ }
+ self.federation_api.update_mapping(self.mapping['id'], rules)
+ r = self._issue_unscoped_token(assertion='UNMATCHED_GROUP_ASSERTION')
+ assigned_group_ids = r.json['token']['user']['OS-FEDERATION']['groups']
+ self.assertEqual(len(group_ids), len(assigned_group_ids))
+ for group in assigned_group_ids:
+ self.assertIn(group['id'], group_ids)
+
+ def test_empty_whitelist_discards_all_values(self):
+ """Test that empty whitelist blocks all the values
+
+ Not adding a ``whitelist`` keyword to the mapping value is different
+ than adding empty whitelist. The former case will simply pass all the
+ values, whereas the latter would discard all the values.
+
+ This test checks scenario where an empty whitelist was specified.
+ The expected result is that no groups are matched.
+
+ The test scenario is as follows:
+ - Create group ``EXISTS``
+ - Set mapping rules for existing IdP with an empty whitelist
+ that whould discard any values from the assertion
+ - Try issuing unscoped token, expect server to raise
+ ``exception.MissingGroups`` as no groups were matched and ephemeral
+ user does not have any group assigned.
+
+ """
+ domain_id = self.domainA['id']
+ domain_name = self.domainA['name']
+ group = self.new_group_ref(domain_id=domain_id)
+ group['name'] = 'EXISTS'
+ group = self.identity_api.create_group(group)
+ rules = {
+ 'rules': [
+ {
+ "local": [
+ {
+ "user": {
+ "name": "{0}",
+ "id": "{0}"
+ }
+ }
+ ],
+ "remote": [
+ {
+ "type": "REMOTE_USER"
+ }
+ ]
+ },
+ {
+ "local": [
+ {
+ "groups": "{0}",
+ "domain": {"name": domain_name}
+ }
+ ],
+ "remote": [
+ {
+ "type": "REMOTE_USER_GROUPS",
+ "whitelist": []
+ }
+ ]
+ }
+ ]
+ }
+ self.federation_api.update_mapping(self.mapping['id'], rules)
+
+ self.assertRaises(exception.MissingGroups,
+ self._issue_unscoped_token,
+ assertion='UNMATCHED_GROUP_ASSERTION')
+
+ def test_not_setting_whitelist_accepts_all_values(self):
+ """Test that not setting whitelist passes
+
+ Not adding a ``whitelist`` keyword to the mapping value is different
+ than adding empty whitelist. The former case will simply pass all the
+ values, whereas the latter would discard all the values.
+
+ This test checks a scenario where a ``whitelist`` was not specified.
+ Expected result is that no groups are ignored.
+
+ The test scenario is as follows:
+ - Create group ``EXISTS``
+ - Set mapping rules for existing IdP with an empty whitelist
+ that whould discard any values from the assertion
+ - Issue an unscoped token and make sure ephemeral user is a member of
+ two groups.
+
+ """
+ domain_id = self.domainA['id']
+ domain_name = self.domainA['name']
+
+ # Add a group "EXISTS"
+ group_exists = self.new_group_ref(domain_id=domain_id)
+ group_exists['name'] = 'EXISTS'
+ group_exists = self.identity_api.create_group(group_exists)
+
+ # Add a group "NO_EXISTS"
+ group_no_exists = self.new_group_ref(domain_id=domain_id)
+ group_no_exists['name'] = 'NO_EXISTS'
+ group_no_exists = self.identity_api.create_group(group_no_exists)
+
+ group_ids = set([group_exists['id'], group_no_exists['id']])
+
+ rules = {
+ 'rules': [
+ {
+ "local": [
+ {
+ "user": {
+ "name": "{0}",
+ "id": "{0}"
+ }
+ }
+ ],
+ "remote": [
+ {
+ "type": "REMOTE_USER"
+ }
+ ]
+ },
+ {
+ "local": [
+ {
+ "groups": "{0}",
+ "domain": {"name": domain_name}
+ }
+ ],
+ "remote": [
+ {
+ "type": "REMOTE_USER_GROUPS",
+ }
+ ]
+ }
+ ]
+ }
+ self.federation_api.update_mapping(self.mapping['id'], rules)
r = self._issue_unscoped_token(assertion='UNMATCHED_GROUP_ASSERTION')
assigned_group_ids = r.json['token']['user']['OS-FEDERATION']['groups']
- self.assertEqual(1, len(assigned_group_ids))
- self.assertEqual(group['id'], assigned_group_ids[0]['id'])
+ self.assertEqual(len(group_ids), len(assigned_group_ids))
+ for group in assigned_group_ids:
+ self.assertIn(group['id'], group_ids)
def test_assertion_prefix_parameter(self):
"""Test parameters filtering based on the prefix.
@@ -2416,27 +2366,24 @@ class FernetFederatedTokenTests(FederationTests, FederatedSetupMixin):
super(FernetFederatedTokenTests, self).load_fixtures(fixtures)
self.load_federation_sample_data()
+ def config_overrides(self):
+ super(FernetFederatedTokenTests, self).config_overrides()
+ self.config_fixture.config(group='token', provider='fernet')
+ self.useFixture(ksfixtures.KeyRepository(self.config_fixture))
+
def auth_plugin_config_override(self):
methods = ['saml2', 'token', 'password']
- method_classes = dict(
- password='keystone.auth.plugins.password.Password',
- token='keystone.auth.plugins.token.Token',
- saml2='keystone.auth.plugins.saml2.Saml2')
super(FernetFederatedTokenTests,
- self).auth_plugin_config_override(methods, **method_classes)
- self.config_fixture.config(
- group='token',
- provider='keystone.token.providers.fernet.Provider')
- self.useFixture(ksfixtures.KeyRepository(self.config_fixture))
+ self).auth_plugin_config_override(methods)
def test_federated_unscoped_token(self):
resp = self._issue_unscoped_token()
- self.assertEqual(186, len(resp.headers['X-Subject-Token']))
+ self.assertEqual(204, len(resp.headers['X-Subject-Token']))
def test_federated_unscoped_token_with_multiple_groups(self):
assertion = 'ANOTHER_CUSTOMER_ASSERTION'
resp = self._issue_unscoped_token(assertion=assertion)
- self.assertEqual(204, len(resp.headers['X-Subject-Token']))
+ self.assertEqual(232, len(resp.headers['X-Subject-Token']))
def test_validate_federated_unscoped_token(self):
resp = self._issue_unscoped_token()
@@ -2481,11 +2428,8 @@ class FederatedTokenTestsMethodToken(FederatedTokenTests):
def auth_plugin_config_override(self):
methods = ['saml2', 'token']
- method_classes = dict(
- token='keystone.auth.plugins.token.Token',
- saml2='keystone.auth.plugins.saml2.Saml2')
super(FederatedTokenTests,
- self).auth_plugin_config_override(methods, **method_classes)
+ self).auth_plugin_config_override(methods)
class JsonHomeTests(FederationTests, test_v3.JsonHomeTestMixin):
@@ -2520,12 +2464,20 @@ class SAMLGenerationTests(FederationTests):
SP_AUTH_URL = ('http://beta.com:5000/v3/OS-FEDERATION/identity_providers'
'/BETA/protocols/saml2/auth')
+
+ ASSERTION_FILE = 'signed_saml2_assertion.xml'
+
+ # The values of the following variables match the attributes values found
+ # in ASSERTION_FILE
ISSUER = 'https://acme.com/FIM/sps/openstack/saml20'
RECIPIENT = 'http://beta.com/Shibboleth.sso/SAML2/POST'
SUBJECT = 'test_user'
+ SUBJECT_DOMAIN = 'user_domain'
ROLES = ['admin', 'member']
PROJECT = 'development'
+ PROJECT_DOMAIN = 'project_domain'
SAML_GENERATION_ROUTE = '/auth/OS-FEDERATION/saml2'
+ ECP_GENERATION_ROUTE = '/auth/OS-FEDERATION/saml2/ecp'
ASSERTION_VERSION = "2.0"
SERVICE_PROVDIER_ID = 'ACME'
@@ -2535,6 +2487,7 @@ class SAMLGenerationTests(FederationTests):
'enabled': True,
'description': uuid.uuid4().hex,
'sp_url': self.RECIPIENT,
+ 'relay_state_prefix': CONF.saml.relay_state_prefix,
}
return ref
@@ -2542,9 +2495,11 @@ class SAMLGenerationTests(FederationTests):
def setUp(self):
super(SAMLGenerationTests, self).setUp()
self.signed_assertion = saml2.create_class_from_xml_string(
- saml.Assertion, _load_xml('signed_saml2_assertion.xml'))
+ saml.Assertion, _load_xml(self.ASSERTION_FILE))
self.sp = self.sp_ref()
- self.federation_api.create_sp(self.SERVICE_PROVDIER_ID, self.sp)
+ url = '/OS-FEDERATION/service_providers/' + self.SERVICE_PROVDIER_ID
+ self.put(url, body={'service_provider': self.sp},
+ expected_status=201)
def test_samlize_token_values(self):
"""Test the SAML generator produces a SAML object.
@@ -2558,8 +2513,10 @@ class SAMLGenerationTests(FederationTests):
return_value=self.signed_assertion):
generator = keystone_idp.SAMLGenerator()
response = generator.samlize_token(self.ISSUER, self.RECIPIENT,
- self.SUBJECT, self.ROLES,
- self.PROJECT)
+ self.SUBJECT,
+ self.SUBJECT_DOMAIN,
+ self.ROLES, self.PROJECT,
+ self.PROJECT_DOMAIN)
assertion = response.assertion
self.assertIsNotNone(assertion)
@@ -2571,14 +2528,24 @@ class SAMLGenerationTests(FederationTests):
user_attribute = assertion.attribute_statement[0].attribute[0]
self.assertEqual(self.SUBJECT, user_attribute.attribute_value[0].text)
- role_attribute = assertion.attribute_statement[0].attribute[1]
+ user_domain_attribute = (
+ assertion.attribute_statement[0].attribute[1])
+ self.assertEqual(self.SUBJECT_DOMAIN,
+ user_domain_attribute.attribute_value[0].text)
+
+ role_attribute = assertion.attribute_statement[0].attribute[2]
for attribute_value in role_attribute.attribute_value:
self.assertIn(attribute_value.text, self.ROLES)
- project_attribute = assertion.attribute_statement[0].attribute[2]
+ project_attribute = assertion.attribute_statement[0].attribute[3]
self.assertEqual(self.PROJECT,
project_attribute.attribute_value[0].text)
+ project_domain_attribute = (
+ assertion.attribute_statement[0].attribute[4])
+ self.assertEqual(self.PROJECT_DOMAIN,
+ project_domain_attribute.attribute_value[0].text)
+
def test_verify_assertion_object(self):
"""Test that the Assertion object is built properly.
@@ -2590,8 +2557,10 @@ class SAMLGenerationTests(FederationTests):
side_effect=lambda x: x):
generator = keystone_idp.SAMLGenerator()
response = generator.samlize_token(self.ISSUER, self.RECIPIENT,
- self.SUBJECT, self.ROLES,
- self.PROJECT)
+ self.SUBJECT,
+ self.SUBJECT_DOMAIN,
+ self.ROLES, self.PROJECT,
+ self.PROJECT_DOMAIN)
assertion = response.assertion
self.assertEqual(self.ASSERTION_VERSION, assertion.version)
@@ -2607,8 +2576,10 @@ class SAMLGenerationTests(FederationTests):
return_value=self.signed_assertion):
generator = keystone_idp.SAMLGenerator()
response = generator.samlize_token(self.ISSUER, self.RECIPIENT,
- self.SUBJECT, self.ROLES,
- self.PROJECT)
+ self.SUBJECT,
+ self.SUBJECT_DOMAIN,
+ self.ROLES, self.PROJECT,
+ self.PROJECT_DOMAIN)
saml_str = response.to_string()
response = etree.fromstring(saml_str)
@@ -2621,13 +2592,19 @@ class SAMLGenerationTests(FederationTests):
user_attribute = assertion[4][0]
self.assertEqual(self.SUBJECT, user_attribute[0].text)
- role_attribute = assertion[4][1]
+ user_domain_attribute = assertion[4][1]
+ self.assertEqual(self.SUBJECT_DOMAIN, user_domain_attribute[0].text)
+
+ role_attribute = assertion[4][2]
for attribute_value in role_attribute:
self.assertIn(attribute_value.text, self.ROLES)
- project_attribute = assertion[4][2]
+ project_attribute = assertion[4][3]
self.assertEqual(self.PROJECT, project_attribute[0].text)
+ project_domain_attribute = assertion[4][4]
+ self.assertEqual(self.PROJECT_DOMAIN, project_domain_attribute[0].text)
+
def test_assertion_using_explicit_namespace_prefixes(self):
def mocked_subprocess_check_output(*popenargs, **kwargs):
# the last option is the assertion file to be signed
@@ -2642,8 +2619,10 @@ class SAMLGenerationTests(FederationTests):
side_effect=mocked_subprocess_check_output):
generator = keystone_idp.SAMLGenerator()
response = generator.samlize_token(self.ISSUER, self.RECIPIENT,
- self.SUBJECT, self.ROLES,
- self.PROJECT)
+ self.SUBJECT,
+ self.SUBJECT_DOMAIN,
+ self.ROLES, self.PROJECT,
+ self.PROJECT_DOMAIN)
assertion_xml = response.assertion.to_string()
# make sure we have the proper tag and prefix for the assertion
# namespace
@@ -2666,8 +2645,9 @@ class SAMLGenerationTests(FederationTests):
generator = keystone_idp.SAMLGenerator()
response = generator.samlize_token(self.ISSUER, self.RECIPIENT,
- self.SUBJECT, self.ROLES,
- self.PROJECT)
+ self.SUBJECT, self.SUBJECT_DOMAIN,
+ self.ROLES, self.PROJECT,
+ self.PROJECT_DOMAIN)
signature = response.assertion.signature
self.assertIsNotNone(signature)
@@ -2770,12 +2750,18 @@ class SAMLGenerationTests(FederationTests):
user_attribute = assertion[4][0]
self.assertIsInstance(user_attribute[0].text, str)
- role_attribute = assertion[4][1]
+ user_domain_attribute = assertion[4][1]
+ self.assertIsInstance(user_domain_attribute[0].text, str)
+
+ role_attribute = assertion[4][2]
self.assertIsInstance(role_attribute[0].text, str)
- project_attribute = assertion[4][2]
+ project_attribute = assertion[4][3]
self.assertIsInstance(project_attribute[0].text, str)
+ project_domain_attribute = assertion[4][4]
+ self.assertIsInstance(project_domain_attribute[0].text, str)
+
def test_invalid_scope_body(self):
"""Test that missing the scope in request body raises an exception.
@@ -2839,6 +2825,104 @@ class SAMLGenerationTests(FederationTests):
self.SERVICE_PROVDIER_ID)
self.post(self.SAML_GENERATION_ROUTE, body=body, expected_status=404)
+ def test_generate_ecp_route(self):
+ """Test that the ECP generation endpoint produces XML.
+
+ The ECP endpoint /v3/auth/OS-FEDERATION/saml2/ecp should take the same
+ input as the SAML generation endpoint (scoped token ID + Service
+ Provider ID).
+ The controller should return a SAML assertion that is wrapped in a
+ SOAP envelope.
+ """
+
+ self.config_fixture.config(group='saml', idp_entity_id=self.ISSUER)
+ token_id = self._fetch_valid_token()
+ body = self._create_generate_saml_request(token_id,
+ self.SERVICE_PROVDIER_ID)
+
+ with mock.patch.object(keystone_idp, '_sign_assertion',
+ return_value=self.signed_assertion):
+ http_response = self.post(self.ECP_GENERATION_ROUTE, body=body,
+ response_content_type='text/xml',
+ expected_status=200)
+
+ env_response = etree.fromstring(http_response.result)
+ header = env_response[0]
+
+ # Verify the relay state starts with 'ss:mem'
+ prefix = CONF.saml.relay_state_prefix
+ self.assertThat(header[0].text, matchers.StartsWith(prefix))
+
+ # Verify that the content in the body matches the expected assertion
+ body = env_response[1]
+ response = body[0]
+ issuer = response[0]
+ assertion = response[2]
+
+ self.assertEqual(self.RECIPIENT, response.get('Destination'))
+ self.assertEqual(self.ISSUER, issuer.text)
+
+ user_attribute = assertion[4][0]
+ self.assertIsInstance(user_attribute[0].text, str)
+
+ user_domain_attribute = assertion[4][1]
+ self.assertIsInstance(user_domain_attribute[0].text, str)
+
+ role_attribute = assertion[4][2]
+ self.assertIsInstance(role_attribute[0].text, str)
+
+ project_attribute = assertion[4][3]
+ self.assertIsInstance(project_attribute[0].text, str)
+
+ project_domain_attribute = assertion[4][4]
+ self.assertIsInstance(project_domain_attribute[0].text, str)
+
+ @mock.patch('saml2.create_class_from_xml_string')
+ @mock.patch('oslo_utils.fileutils.write_to_tempfile')
+ @mock.patch('subprocess.check_output')
+ def test__sign_assertion(self, check_output_mock,
+ write_to_tempfile_mock, create_class_mock):
+ write_to_tempfile_mock.return_value = 'tmp_path'
+ check_output_mock.return_value = 'fakeoutput'
+
+ keystone_idp._sign_assertion(self.signed_assertion)
+
+ create_class_mock.assert_called_with(saml.Assertion, 'fakeoutput')
+
+ @mock.patch('oslo_utils.fileutils.write_to_tempfile')
+ @mock.patch('subprocess.check_output')
+ def test__sign_assertion_exc(self, check_output_mock,
+ write_to_tempfile_mock):
+ # If the command fails the command output is logged.
+
+ write_to_tempfile_mock.return_value = 'tmp_path'
+
+ sample_returncode = 1
+ sample_output = self.getUniqueString()
+ check_output_mock.side_effect = subprocess.CalledProcessError(
+ returncode=sample_returncode, cmd=CONF.saml.xmlsec1_binary,
+ output=sample_output)
+
+ # FIXME(blk-u): This should raise exception.SAMLSigningError instead,
+ # but fails with TypeError due to concatenating string to Message, see
+ # bug 1484735.
+ self.assertRaises(TypeError,
+ keystone_idp._sign_assertion,
+ self.signed_assertion)
+
+ @mock.patch('oslo_utils.fileutils.write_to_tempfile')
+ def test__sign_assertion_fileutils_exc(self, write_to_tempfile_mock):
+ exception_msg = 'fake'
+ write_to_tempfile_mock.side_effect = Exception(exception_msg)
+
+ logger_fixture = self.useFixture(fixtures.LoggerFixture())
+ self.assertRaises(exception.SAMLSigningError,
+ keystone_idp._sign_assertion,
+ self.signed_assertion)
+ expected_log = (
+ 'Error when signing assertion, reason: %s\n' % exception_msg)
+ self.assertEqual(expected_log, logger_fixture.output)
+
class IdPMetadataGenerationTests(FederationTests):
"""A class for testing Identity Provider Metadata generation."""
@@ -2976,7 +3060,8 @@ class ServiceProviderTests(FederationTests):
MEMBER_NAME = 'service_provider'
COLLECTION_NAME = 'service_providers'
SERVICE_PROVIDER_ID = 'ACME'
- SP_KEYS = ['auth_url', 'id', 'enabled', 'description', 'sp_url']
+ SP_KEYS = ['auth_url', 'id', 'enabled', 'description',
+ 'relay_state_prefix', 'sp_url']
def setUp(self):
super(FederationTests, self).setUp()
@@ -2993,6 +3078,7 @@ class ServiceProviderTests(FederationTests):
'enabled': True,
'description': uuid.uuid4().hex,
'sp_url': 'https://' + uuid.uuid4().hex + '.com',
+ 'relay_state_prefix': CONF.saml.relay_state_prefix
}
return ref
@@ -3019,6 +3105,29 @@ class ServiceProviderTests(FederationTests):
self.assertValidEntity(resp.result['service_provider'],
keys_to_check=self.SP_KEYS)
+ def test_create_sp_relay_state_default(self):
+ """Create an SP without relay state, should default to `ss:mem`."""
+ url = self.base_url(suffix=uuid.uuid4().hex)
+ sp = self.sp_ref()
+ del sp['relay_state_prefix']
+ resp = self.put(url, body={'service_provider': sp},
+ expected_status=201)
+ sp_result = resp.result['service_provider']
+ self.assertEqual(CONF.saml.relay_state_prefix,
+ sp_result['relay_state_prefix'])
+
+ def test_create_sp_relay_state_non_default(self):
+ """Create an SP with custom relay state."""
+ url = self.base_url(suffix=uuid.uuid4().hex)
+ sp = self.sp_ref()
+ non_default_prefix = uuid.uuid4().hex
+ sp['relay_state_prefix'] = non_default_prefix
+ resp = self.put(url, body={'service_provider': sp},
+ expected_status=201)
+ sp_result = resp.result['service_provider']
+ self.assertEqual(non_default_prefix,
+ sp_result['relay_state_prefix'])
+
def test_create_service_provider_fail(self):
"""Try adding SP object with unallowed attribute."""
url = self.base_url(suffix=uuid.uuid4().hex)
@@ -3108,6 +3217,18 @@ class ServiceProviderTests(FederationTests):
self.patch(url, body={'service_provider': new_sp_ref},
expected_status=404)
+ def test_update_sp_relay_state(self):
+ """Update an SP with custome relay state."""
+ new_sp_ref = self.sp_ref()
+ non_default_prefix = uuid.uuid4().hex
+ new_sp_ref['relay_state_prefix'] = non_default_prefix
+ url = self.base_url(suffix=self.SERVICE_PROVIDER_ID)
+ resp = self.patch(url, body={'service_provider': new_sp_ref},
+ expected_status=200)
+ sp_result = resp.result['service_provider']
+ self.assertEqual(non_default_prefix,
+ sp_result['relay_state_prefix'])
+
def test_delete_service_provider(self):
url = self.base_url(suffix=self.SERVICE_PROVIDER_ID)
self.delete(url, expected_status=204)
@@ -3125,6 +3246,7 @@ class WebSSOTests(FederatedTokenTests):
SSO_TEMPLATE_PATH = os.path.join(core.dirs.etc(), SSO_TEMPLATE_NAME)
TRUSTED_DASHBOARD = 'http://horizon.com'
ORIGIN = urllib.parse.quote_plus(TRUSTED_DASHBOARD)
+ PROTOCOL_REMOTE_ID_ATTR = uuid.uuid4().hex
def setUp(self):
super(WebSSOTests, self).setUp()
@@ -3145,7 +3267,19 @@ class WebSSOTests(FederatedTokenTests):
self.assertIn(self.TRUSTED_DASHBOARD, resp.body)
def test_federated_sso_auth(self):
- environment = {self.REMOTE_ID_ATTR: self.REMOTE_ID}
+ environment = {self.REMOTE_ID_ATTR: self.REMOTE_IDS[0]}
+ context = {'environment': environment}
+ query_string = {'origin': self.ORIGIN}
+ self._inject_assertion(context, 'EMPLOYEE_ASSERTION', query_string)
+ resp = self.api.federated_sso_auth(context, self.PROTOCOL)
+ self.assertIn(self.TRUSTED_DASHBOARD, resp.body)
+
+ def test_federated_sso_auth_with_protocol_specific_remote_id(self):
+ self.config_fixture.config(
+ group=self.PROTOCOL,
+ remote_id_attribute=self.PROTOCOL_REMOTE_ID_ATTR)
+
+ environment = {self.PROTOCOL_REMOTE_ID_ATTR: self.REMOTE_IDS[0]}
context = {'environment': environment}
query_string = {'origin': self.ORIGIN}
self._inject_assertion(context, 'EMPLOYEE_ASSERTION', query_string)
@@ -3162,7 +3296,7 @@ class WebSSOTests(FederatedTokenTests):
context, self.PROTOCOL)
def test_federated_sso_missing_query(self):
- environment = {self.REMOTE_ID_ATTR: self.REMOTE_ID}
+ environment = {self.REMOTE_ID_ATTR: self.REMOTE_IDS[0]}
context = {'environment': environment}
self._inject_assertion(context, 'EMPLOYEE_ASSERTION')
self.assertRaises(exception.ValidationError,
@@ -3178,7 +3312,7 @@ class WebSSOTests(FederatedTokenTests):
context, self.PROTOCOL)
def test_federated_sso_untrusted_dashboard(self):
- environment = {self.REMOTE_ID_ATTR: self.REMOTE_ID}
+ environment = {self.REMOTE_ID_ATTR: self.REMOTE_IDS[0]}
context = {'environment': environment}
query_string = {'origin': uuid.uuid4().hex}
self._inject_assertion(context, 'EMPLOYEE_ASSERTION', query_string)
@@ -3229,6 +3363,7 @@ class K2KServiceCatalogTests(FederationTests):
def sp_response(self, id, ref):
ref.pop('enabled')
ref.pop('description')
+ ref.pop('relay_state_prefix')
ref['id'] = id
return ref
@@ -3238,6 +3373,7 @@ class K2KServiceCatalogTests(FederationTests):
'enabled': True,
'description': uuid.uuid4().hex,
'sp_url': uuid.uuid4().hex,
+ 'relay_state_prefix': CONF.saml.relay_state_prefix,
}
return ref
diff --git a/keystone-moon/keystone/tests/unit/test_v3_filters.py b/keystone-moon/keystone/tests/unit/test_v3_filters.py
index 4ad44657..668a2308 100644
--- a/keystone-moon/keystone/tests/unit/test_v3_filters.py
+++ b/keystone-moon/keystone/tests/unit/test_v3_filters.py
@@ -17,6 +17,7 @@ import uuid
from oslo_config import cfg
from oslo_serialization import jsonutils
+from six.moves import range
from keystone.tests.unit import filtering
from keystone.tests.unit.ksfixtures import temporaryfile
@@ -331,12 +332,6 @@ class IdentityTestListLimitCase(IdentityTestFilteredCase):
super(IdentityTestListLimitCase, self).setUp()
- self._set_policy({"identity:list_users": [],
- "identity:list_groups": [],
- "identity:list_projects": [],
- "identity:list_services": [],
- "identity:list_policies": []})
-
# Create 10 entries for each of the entities we are going to test
self.ENTITY_TYPES = ['user', 'group', 'project']
self.entity_lists = {}
@@ -398,6 +393,7 @@ class IdentityTestListLimitCase(IdentityTestFilteredCase):
else:
plural = '%ss' % entity
+ self._set_policy({"identity:list_%s" % plural: []})
self.config_fixture.config(list_limit=5)
self.config_fixture.config(group=driver, list_limit=None)
r = self.get('/%s' % plural, auth=self.auth)
@@ -435,6 +431,7 @@ class IdentityTestListLimitCase(IdentityTestFilteredCase):
def test_no_limit(self):
"""Check truncated attribute not set when list not limited."""
+ self._set_policy({"identity:list_services": []})
r = self.get('/services', auth=self.auth)
self.assertEqual(10, len(r.result.get('services')))
self.assertIsNone(r.result.get('truncated'))
@@ -445,6 +442,7 @@ class IdentityTestListLimitCase(IdentityTestFilteredCase):
# Test this by overriding the general limit with a higher
# driver-specific limit (allowing all entities to be returned
# in the collection), which should result in a non truncated list
+ self._set_policy({"identity:list_services": []})
self.config_fixture.config(list_limit=5)
self.config_fixture.config(group='catalog', list_limit=10)
r = self.get('/services', auth=self.auth)
diff --git a/keystone-moon/keystone/tests/unit/test_v3_identity.py b/keystone-moon/keystone/tests/unit/test_v3_identity.py
index ac077297..e0090829 100644
--- a/keystone-moon/keystone/tests/unit/test_v3_identity.py
+++ b/keystone-moon/keystone/tests/unit/test_v3_identity.py
@@ -12,8 +12,10 @@
# License for the specific language governing permissions and limitations
# under the License.
+import logging
import uuid
+import fixtures
from oslo_config import cfg
from testtools import matchers
@@ -434,6 +436,38 @@ class IdentityTestCase(test_v3.RestfulTestCase):
self.delete('/groups/%(group_id)s' % {
'group_id': self.group_id})
+ def test_create_user_password_not_logged(self):
+ # When a user is created, the password isn't logged at any level.
+
+ log_fix = self.useFixture(fixtures.FakeLogger(level=logging.DEBUG))
+
+ ref = self.new_user_ref(domain_id=self.domain_id)
+ self.post(
+ '/users',
+ body={'user': ref})
+
+ self.assertNotIn(ref['password'], log_fix.output)
+
+ def test_update_password_not_logged(self):
+ # When admin modifies user password, the password isn't logged at any
+ # level.
+
+ log_fix = self.useFixture(fixtures.FakeLogger(level=logging.DEBUG))
+
+ # bootstrap a user as admin
+ user_ref = self.new_user_ref(domain_id=self.domain['id'])
+ password = user_ref['password']
+ user_ref = self.identity_api.create_user(user_ref)
+
+ # administrative password reset
+ new_password = uuid.uuid4().hex
+ self.patch('/users/%s' % user_ref['id'],
+ body={'user': {'password': new_password}},
+ expected_status=200)
+
+ self.assertNotIn(password, log_fix.output)
+ self.assertNotIn(new_password, log_fix.output)
+
class IdentityV3toV2MethodsTestCase(tests.TestCase):
"""Test users V3 to V2 conversion methods."""
@@ -444,27 +478,26 @@ class IdentityV3toV2MethodsTestCase(tests.TestCase):
self.user_id = uuid.uuid4().hex
self.default_project_id = uuid.uuid4().hex
self.tenant_id = uuid.uuid4().hex
- self.domain_id = uuid.uuid4().hex
# User with only default_project_id in ref
self.user1 = {'id': self.user_id,
'name': self.user_id,
'default_project_id': self.default_project_id,
- 'domain_id': self.domain_id}
+ 'domain_id': CONF.identity.default_domain_id}
# User without default_project_id or tenantId in ref
self.user2 = {'id': self.user_id,
'name': self.user_id,
- 'domain_id': self.domain_id}
+ 'domain_id': CONF.identity.default_domain_id}
# User with both tenantId and default_project_id in ref
self.user3 = {'id': self.user_id,
'name': self.user_id,
'default_project_id': self.default_project_id,
'tenantId': self.tenant_id,
- 'domain_id': self.domain_id}
+ 'domain_id': CONF.identity.default_domain_id}
# User with only tenantId in ref
self.user4 = {'id': self.user_id,
'name': self.user_id,
'tenantId': self.tenant_id,
- 'domain_id': self.domain_id}
+ 'domain_id': CONF.identity.default_domain_id}
# Expected result if the user is meant to have a tenantId element
self.expected_user = {'id': self.user_id,
@@ -582,3 +615,18 @@ class UserSelfServiceChangingPasswordsTestCase(test_v3.RestfulTestCase):
self.change_password(password=uuid.uuid4().hex,
original_password=self.user_ref['password'],
expected_status=401)
+
+ def test_changing_password_not_logged(self):
+ # When a user changes their password, the password isn't logged at any
+ # level.
+
+ log_fix = self.useFixture(fixtures.FakeLogger(level=logging.DEBUG))
+
+ # change password
+ new_password = uuid.uuid4().hex
+ self.change_password(password=new_password,
+ original_password=self.user_ref['password'],
+ expected_status=204)
+
+ self.assertNotIn(self.user_ref['password'], log_fix.output)
+ self.assertNotIn(new_password, log_fix.output)
diff --git a/keystone-moon/keystone/tests/unit/test_v3_oauth1.py b/keystone-moon/keystone/tests/unit/test_v3_oauth1.py
index 608162d8..6c063c5e 100644
--- a/keystone-moon/keystone/tests/unit/test_v3_oauth1.py
+++ b/keystone-moon/keystone/tests/unit/test_v3_oauth1.py
@@ -241,16 +241,6 @@ class ConsumerCRUDTests(OAuth1Tests):
class OAuthFlowTests(OAuth1Tests):
- def auth_plugin_config_override(self):
- methods = ['password', 'token', 'oauth1']
- method_classes = {
- 'password': 'keystone.auth.plugins.password.Password',
- 'token': 'keystone.auth.plugins.token.Token',
- 'oauth1': 'keystone.auth.plugins.oauth1.OAuth',
- }
- super(OAuthFlowTests, self).auth_plugin_config_override(
- methods, **method_classes)
-
def test_oauth_flow(self):
consumer = self._create_single_consumer()
consumer_id = consumer['id']
diff --git a/keystone-moon/keystone/tests/unit/test_v3_os_revoke.py b/keystone-moon/keystone/tests/unit/test_v3_os_revoke.py
index 5710d973..48226cd4 100644
--- a/keystone-moon/keystone/tests/unit/test_v3_os_revoke.py
+++ b/keystone-moon/keystone/tests/unit/test_v3_os_revoke.py
@@ -17,6 +17,7 @@ from oslo_utils import timeutils
import six
from testtools import matchers
+from keystone.common import utils
from keystone.contrib.revoke import model
from keystone.tests.unit import test_v3
from keystone.token import provider
@@ -25,7 +26,7 @@ from keystone.token import provider
def _future_time_string():
expire_delta = datetime.timedelta(seconds=1000)
future_time = timeutils.utcnow() + expire_delta
- return timeutils.isotime(future_time)
+ return utils.isotime(future_time)
class OSRevokeTests(test_v3.RestfulTestCase, test_v3.JsonHomeTestMixin):
@@ -55,13 +56,13 @@ class OSRevokeTests(test_v3.RestfulTestCase, test_v3.JsonHomeTestMixin):
self.assertTrue(
before_time <= event_issued_before,
'invalid event issued_before time; %s is not later than %s.' % (
- timeutils.isotime(event_issued_before, subsecond=True),
- timeutils.isotime(before_time, subsecond=True)))
+ utils.isotime(event_issued_before, subsecond=True),
+ utils.isotime(before_time, subsecond=True)))
self.assertTrue(
event_issued_before <= after_time,
'invalid event issued_before time; %s is not earlier than %s.' % (
- timeutils.isotime(event_issued_before, subsecond=True),
- timeutils.isotime(after_time, subsecond=True)))
+ utils.isotime(event_issued_before, subsecond=True),
+ utils.isotime(after_time, subsecond=True)))
del (event['issued_before'])
self.assertEqual(sample, event)
@@ -76,7 +77,7 @@ class OSRevokeTests(test_v3.RestfulTestCase, test_v3.JsonHomeTestMixin):
expires_at = provider.default_expire_time()
sample = self._blank_event()
sample['user_id'] = six.text_type(user_id)
- sample['expires_at'] = six.text_type(timeutils.isotime(expires_at))
+ sample['expires_at'] = six.text_type(utils.isotime(expires_at))
before_time = timeutils.utcnow()
self.revoke_api.revoke_by_expiration(user_id, expires_at)
resp = self.get('/OS-REVOKE/events')
diff --git a/keystone-moon/keystone/tests/unit/test_v3_protection.py b/keystone-moon/keystone/tests/unit/test_v3_protection.py
index 2b2c96d1..458c61de 100644
--- a/keystone-moon/keystone/tests/unit/test_v3_protection.py
+++ b/keystone-moon/keystone/tests/unit/test_v3_protection.py
@@ -391,23 +391,18 @@ class IdentityTestPolicySample(test_v3.RestfulTestCase):
# Given a non-admin user token, the token can be used to validate
# itself.
# This is GET /v3/auth/tokens, with X-Auth-Token == X-Subject-Token
- # FIXME(blk-u): This test fails, a user can't validate their own token,
- # see bug 1421825.
auth = self.build_authentication_request(
user_id=self.just_a_user['id'],
password=self.just_a_user['password'])
token = self.get_requested_token(auth)
- # FIXME(blk-u): remove expected_status=403.
self.get('/auth/tokens', token=token,
- headers={'X-Subject-Token': token}, expected_status=403)
+ headers={'X-Subject-Token': token})
def test_user_validate_user_token(self):
# A user can validate one of their own tokens.
# This is GET /v3/auth/tokens
- # FIXME(blk-u): This test fails, a user can't validate their own token,
- # see bug 1421825.
auth = self.build_authentication_request(
user_id=self.just_a_user['id'],
@@ -415,9 +410,8 @@ class IdentityTestPolicySample(test_v3.RestfulTestCase):
token1 = self.get_requested_token(auth)
token2 = self.get_requested_token(auth)
- # FIXME(blk-u): remove expected_status=403.
self.get('/auth/tokens', token=token1,
- headers={'X-Subject-Token': token2}, expected_status=403)
+ headers={'X-Subject-Token': token2})
def test_user_validate_other_user_token_rejected(self):
# A user cannot validate another user's token.
@@ -458,23 +452,18 @@ class IdentityTestPolicySample(test_v3.RestfulTestCase):
# Given a non-admin user token, the token can be used to check
# itself.
# This is HEAD /v3/auth/tokens, with X-Auth-Token == X-Subject-Token
- # FIXME(blk-u): This test fails, a user can't check the same token,
- # see bug 1421825.
auth = self.build_authentication_request(
user_id=self.just_a_user['id'],
password=self.just_a_user['password'])
token = self.get_requested_token(auth)
- # FIXME(blk-u): change to expected_status=200
self.head('/auth/tokens', token=token,
- headers={'X-Subject-Token': token}, expected_status=403)
+ headers={'X-Subject-Token': token}, expected_status=200)
def test_user_check_user_token(self):
# A user can check one of their own tokens.
# This is HEAD /v3/auth/tokens
- # FIXME(blk-u): This test fails, a user can't check the same token,
- # see bug 1421825.
auth = self.build_authentication_request(
user_id=self.just_a_user['id'],
@@ -482,9 +471,8 @@ class IdentityTestPolicySample(test_v3.RestfulTestCase):
token1 = self.get_requested_token(auth)
token2 = self.get_requested_token(auth)
- # FIXME(blk-u): change to expected_status=200
self.head('/auth/tokens', token=token1,
- headers={'X-Subject-Token': token2}, expected_status=403)
+ headers={'X-Subject-Token': token2}, expected_status=200)
def test_user_check_other_user_token_rejected(self):
# A user cannot check another user's token.
@@ -526,23 +514,18 @@ class IdentityTestPolicySample(test_v3.RestfulTestCase):
# Given a non-admin user token, the token can be used to revoke
# itself.
# This is DELETE /v3/auth/tokens, with X-Auth-Token == X-Subject-Token
- # FIXME(blk-u): This test fails, a user can't revoke the same token,
- # see bug 1421825.
auth = self.build_authentication_request(
user_id=self.just_a_user['id'],
password=self.just_a_user['password'])
token = self.get_requested_token(auth)
- # FIXME(blk-u): remove expected_status=403
self.delete('/auth/tokens', token=token,
- headers={'X-Subject-Token': token}, expected_status=403)
+ headers={'X-Subject-Token': token})
def test_user_revoke_user_token(self):
# A user can revoke one of their own tokens.
# This is DELETE /v3/auth/tokens
- # FIXME(blk-u): This test fails, a user can't revoke the same token,
- # see bug 1421825.
auth = self.build_authentication_request(
user_id=self.just_a_user['id'],
@@ -550,9 +533,8 @@ class IdentityTestPolicySample(test_v3.RestfulTestCase):
token1 = self.get_requested_token(auth)
token2 = self.get_requested_token(auth)
- # FIXME(blk-u): remove expected_status=403
self.delete('/auth/tokens', token=token1,
- headers={'X-Subject-Token': token2}, expected_status=403)
+ headers={'X-Subject-Token': token2})
def test_user_revoke_other_user_token_rejected(self):
# A user cannot revoke another user's token.
@@ -591,7 +573,8 @@ class IdentityTestPolicySample(test_v3.RestfulTestCase):
headers={'X-Subject-Token': user_token})
-class IdentityTestv3CloudPolicySample(test_v3.RestfulTestCase):
+class IdentityTestv3CloudPolicySample(test_v3.RestfulTestCase,
+ test_v3.AssignmentTestMixin):
"""Test policy enforcement of the sample v3 cloud policy file."""
def setUp(self):
@@ -905,6 +888,141 @@ class IdentityTestv3CloudPolicySample(test_v3.RestfulTestCase):
self._test_grants('projects', self.project['id'])
+ def test_cloud_admin_list_assignments_of_domain(self):
+ self.auth = self.build_authentication_request(
+ user_id=self.cloud_admin_user['id'],
+ password=self.cloud_admin_user['password'],
+ domain_id=self.admin_domain['id'])
+
+ collection_url = self.build_role_assignment_query_url(
+ domain_id=self.domainA['id'])
+ r = self.get(collection_url, auth=self.auth)
+ self.assertValidRoleAssignmentListResponse(
+ r, expected_length=2, resource_url=collection_url)
+
+ domainA_admin_entity = self.build_role_assignment_entity(
+ domain_id=self.domainA['id'],
+ user_id=self.domain_admin_user['id'],
+ role_id=self.admin_role['id'],
+ inherited_to_projects=False)
+ domainA_user_entity = self.build_role_assignment_entity(
+ domain_id=self.domainA['id'],
+ user_id=self.just_a_user['id'],
+ role_id=self.role['id'],
+ inherited_to_projects=False)
+
+ self.assertRoleAssignmentInListResponse(r, domainA_admin_entity)
+ self.assertRoleAssignmentInListResponse(r, domainA_user_entity)
+
+ def test_domain_admin_list_assignments_of_domain(self):
+ self.auth = self.build_authentication_request(
+ user_id=self.domain_admin_user['id'],
+ password=self.domain_admin_user['password'],
+ domain_id=self.domainA['id'])
+
+ collection_url = self.build_role_assignment_query_url(
+ domain_id=self.domainA['id'])
+ r = self.get(collection_url, auth=self.auth)
+ self.assertValidRoleAssignmentListResponse(
+ r, expected_length=2, resource_url=collection_url)
+
+ domainA_admin_entity = self.build_role_assignment_entity(
+ domain_id=self.domainA['id'],
+ user_id=self.domain_admin_user['id'],
+ role_id=self.admin_role['id'],
+ inherited_to_projects=False)
+ domainA_user_entity = self.build_role_assignment_entity(
+ domain_id=self.domainA['id'],
+ user_id=self.just_a_user['id'],
+ role_id=self.role['id'],
+ inherited_to_projects=False)
+
+ self.assertRoleAssignmentInListResponse(r, domainA_admin_entity)
+ self.assertRoleAssignmentInListResponse(r, domainA_user_entity)
+
+ def test_domain_admin_list_assignments_of_another_domain_failed(self):
+ self.auth = self.build_authentication_request(
+ user_id=self.domain_admin_user['id'],
+ password=self.domain_admin_user['password'],
+ domain_id=self.domainA['id'])
+
+ collection_url = self.build_role_assignment_query_url(
+ domain_id=self.domainB['id'])
+ self.get(collection_url, auth=self.auth, expected_status=403)
+
+ def test_domain_user_list_assignments_of_domain_failed(self):
+ self.auth = self.build_authentication_request(
+ user_id=self.just_a_user['id'],
+ password=self.just_a_user['password'],
+ domain_id=self.domainA['id'])
+
+ collection_url = self.build_role_assignment_query_url(
+ domain_id=self.domainA['id'])
+ self.get(collection_url, auth=self.auth, expected_status=403)
+
+ def test_cloud_admin_list_assignments_of_project(self):
+ self.auth = self.build_authentication_request(
+ user_id=self.cloud_admin_user['id'],
+ password=self.cloud_admin_user['password'],
+ domain_id=self.admin_domain['id'])
+
+ collection_url = self.build_role_assignment_query_url(
+ project_id=self.project['id'])
+ r = self.get(collection_url, auth=self.auth)
+ self.assertValidRoleAssignmentListResponse(
+ r, expected_length=2, resource_url=collection_url)
+
+ project_admin_entity = self.build_role_assignment_entity(
+ project_id=self.project['id'],
+ user_id=self.project_admin_user['id'],
+ role_id=self.admin_role['id'],
+ inherited_to_projects=False)
+ project_user_entity = self.build_role_assignment_entity(
+ project_id=self.project['id'],
+ user_id=self.just_a_user['id'],
+ role_id=self.role['id'],
+ inherited_to_projects=False)
+
+ self.assertRoleAssignmentInListResponse(r, project_admin_entity)
+ self.assertRoleAssignmentInListResponse(r, project_user_entity)
+
+ @tests.utils.wip('waiting on bug #1437407')
+ def test_domain_admin_list_assignments_of_project(self):
+ self.auth = self.build_authentication_request(
+ user_id=self.domain_admin_user['id'],
+ password=self.domain_admin_user['password'],
+ domain_id=self.domainA['id'])
+
+ collection_url = self.build_role_assignment_query_url(
+ project_id=self.project['id'])
+ r = self.get(collection_url, auth=self.auth)
+ self.assertValidRoleAssignmentListResponse(
+ r, expected_length=2, resource_url=collection_url)
+
+ project_admin_entity = self.build_role_assignment_entity(
+ project_id=self.project['id'],
+ user_id=self.project_admin_user['id'],
+ role_id=self.admin_role['id'],
+ inherited_to_projects=False)
+ project_user_entity = self.build_role_assignment_entity(
+ project_id=self.project['id'],
+ user_id=self.just_a_user['id'],
+ role_id=self.role['id'],
+ inherited_to_projects=False)
+
+ self.assertRoleAssignmentInListResponse(r, project_admin_entity)
+ self.assertRoleAssignmentInListResponse(r, project_user_entity)
+
+ def test_domain_user_list_assignments_of_project_failed(self):
+ self.auth = self.build_authentication_request(
+ user_id=self.just_a_user['id'],
+ password=self.just_a_user['password'],
+ domain_id=self.domainA['id'])
+
+ collection_url = self.build_role_assignment_query_url(
+ project_id=self.project['id'])
+ self.get(collection_url, auth=self.auth, expected_status=403)
+
def test_cloud_admin(self):
self.auth = self.build_authentication_request(
user_id=self.domain_admin_user['id'],
@@ -921,6 +1039,14 @@ class IdentityTestv3CloudPolicySample(test_v3.RestfulTestCase):
self._test_domain_management()
+ def test_domain_admin_get_domain(self):
+ self.auth = self.build_authentication_request(
+ user_id=self.domain_admin_user['id'],
+ password=self.domain_admin_user['password'],
+ domain_id=self.domainA['id'])
+ entity_url = '/domains/%s' % self.domainA['id']
+ self.get(entity_url, auth=self.auth, expected_status=200)
+
def test_list_user_credentials(self):
self.credential_user = self.new_credential_ref(self.just_a_user['id'])
self.credential_api.create_credential(self.credential_user['id'],
@@ -982,23 +1108,18 @@ class IdentityTestv3CloudPolicySample(test_v3.RestfulTestCase):
# Given a non-admin user token, the token can be used to validate
# itself.
# This is GET /v3/auth/tokens, with X-Auth-Token == X-Subject-Token
- # FIXME(blk-u): This test fails, a user can't validate their own token,
- # see bug 1421825.
auth = self.build_authentication_request(
user_id=self.just_a_user['id'],
password=self.just_a_user['password'])
token = self.get_requested_token(auth)
- # FIXME(blk-u): remove expected_status=403.
self.get('/auth/tokens', token=token,
- headers={'X-Subject-Token': token}, expected_status=403)
+ headers={'X-Subject-Token': token})
def test_user_validate_user_token(self):
# A user can validate one of their own tokens.
# This is GET /v3/auth/tokens
- # FIXME(blk-u): This test fails, a user can't validate their own token,
- # see bug 1421825.
auth = self.build_authentication_request(
user_id=self.just_a_user['id'],
@@ -1006,9 +1127,8 @@ class IdentityTestv3CloudPolicySample(test_v3.RestfulTestCase):
token1 = self.get_requested_token(auth)
token2 = self.get_requested_token(auth)
- # FIXME(blk-u): remove expected_status=403.
self.get('/auth/tokens', token=token1,
- headers={'X-Subject-Token': token2}, expected_status=403)
+ headers={'X-Subject-Token': token2})
def test_user_validate_other_user_token_rejected(self):
# A user cannot validate another user's token.
diff --git a/keystone-moon/keystone/tests/unit/test_validation.py b/keystone-moon/keystone/tests/unit/test_validation.py
index f83cabcb..f7a224a0 100644
--- a/keystone-moon/keystone/tests/unit/test_validation.py
+++ b/keystone-moon/keystone/tests/unit/test_validation.py
@@ -13,6 +13,7 @@
import uuid
+import six
import testtools
from keystone.assignment import schema as assignment_schema
@@ -24,8 +25,10 @@ from keystone.contrib.endpoint_filter import schema as endpoint_filter_schema
from keystone.contrib.federation import schema as federation_schema
from keystone.credential import schema as credential_schema
from keystone import exception
+from keystone.identity import schema as identity_schema
from keystone.policy import schema as policy_schema
from keystone.resource import schema as resource_schema
+from keystone.tests import unit
from keystone.trust import schema as trust_schema
"""Example model to validate create requests against. Assume that this is
@@ -96,7 +99,84 @@ _VALID_FILTERS = [{'interface': 'admin'},
_INVALID_FILTERS = ['some string', 1, 0, True, False]
-class EntityValidationTestCase(testtools.TestCase):
+class ValidatedDecoratorTests(unit.BaseTestCase):
+
+ entity_schema = {
+ 'type': 'object',
+ 'properties': {
+ 'name': parameter_types.name,
+ },
+ 'required': ['name'],
+ }
+
+ valid_entity = {
+ 'name': uuid.uuid4().hex,
+ }
+
+ invalid_entity = {}
+
+ @validation.validated(entity_schema, 'entity')
+ def do_something(self, entity):
+ pass
+
+ @validation.validated(entity_create, 'entity')
+ def create_entity(self, entity):
+ pass
+
+ @validation.validated(entity_update, 'entity')
+ def update_entity(self, entity_id, entity):
+ pass
+
+ def _assert_call_entity_method_fails(self, method, *args, **kwargs):
+ e = self.assertRaises(exception.ValidationError, method,
+ *args, **kwargs)
+
+ self.assertIn('Expecting to find entity in request body',
+ six.text_type(e))
+
+ def test_calling_with_valid_entity_kwarg_succeeds(self):
+ self.do_something(entity=self.valid_entity)
+
+ def test_calling_with_invalid_entity_kwarg_fails(self):
+ self.assertRaises(exception.ValidationError,
+ self.do_something,
+ entity=self.invalid_entity)
+
+ def test_calling_with_valid_entity_arg_succeeds(self):
+ self.do_something(self.valid_entity)
+
+ def test_calling_with_invalid_entity_arg_fails(self):
+ self.assertRaises(exception.ValidationError,
+ self.do_something,
+ self.invalid_entity)
+
+ def test_using_the_wrong_name_with_the_decorator_fails(self):
+ with testtools.ExpectedException(TypeError):
+ @validation.validated(self.entity_schema, 'entity_')
+ def function(entity):
+ pass
+
+ def test_create_entity_no_request_body_with_decorator(self):
+ """Test the case when request body is not provided."""
+ self._assert_call_entity_method_fails(self.create_entity)
+
+ def test_create_entity_empty_request_body_with_decorator(self):
+ """Test the case when client passing in an empty entity reference."""
+ self._assert_call_entity_method_fails(self.create_entity, entity={})
+
+ def test_update_entity_no_request_body_with_decorator(self):
+ """Test the case when request body is not provided."""
+ self._assert_call_entity_method_fails(self.update_entity,
+ uuid.uuid4().hex)
+
+ def test_update_entity_empty_request_body_with_decorator(self):
+ """Test the case when client passing in an empty entity reference."""
+ self._assert_call_entity_method_fails(self.update_entity,
+ uuid.uuid4().hex,
+ entity={})
+
+
+class EntityValidationTestCase(unit.BaseTestCase):
def setUp(self):
super(EntityValidationTestCase, self).setUp()
@@ -226,7 +306,7 @@ class EntityValidationTestCase(testtools.TestCase):
def test_create_entity_with_invalid_id_strings(self):
"""Exception raised when using invalid id strings."""
long_string = 'A' * 65
- invalid_id_strings = ['', long_string, 'this,should,fail']
+ invalid_id_strings = ['', long_string]
for invalid_id in invalid_id_strings:
request_to_validate = {'name': self.resource_name,
'id_string': invalid_id}
@@ -299,7 +379,7 @@ class EntityValidationTestCase(testtools.TestCase):
request_to_validate)
-class ProjectValidationTestCase(testtools.TestCase):
+class ProjectValidationTestCase(unit.BaseTestCase):
"""Test for V3 Project API validation."""
def setUp(self):
@@ -426,7 +506,7 @@ class ProjectValidationTestCase(testtools.TestCase):
request_to_validate)
-class DomainValidationTestCase(testtools.TestCase):
+class DomainValidationTestCase(unit.BaseTestCase):
"""Test for V3 Domain API validation."""
def setUp(self):
@@ -524,7 +604,7 @@ class DomainValidationTestCase(testtools.TestCase):
request_to_validate)
-class RoleValidationTestCase(testtools.TestCase):
+class RoleValidationTestCase(unit.BaseTestCase):
"""Test for V3 Role API validation."""
def setUp(self):
@@ -578,7 +658,7 @@ class RoleValidationTestCase(testtools.TestCase):
request_to_validate)
-class PolicyValidationTestCase(testtools.TestCase):
+class PolicyValidationTestCase(unit.BaseTestCase):
"""Test for V3 Policy API validation."""
def setUp(self):
@@ -653,7 +733,7 @@ class PolicyValidationTestCase(testtools.TestCase):
request_to_validate)
-class CredentialValidationTestCase(testtools.TestCase):
+class CredentialValidationTestCase(unit.BaseTestCase):
"""Test for V3 Credential API validation."""
def setUp(self):
@@ -770,7 +850,7 @@ class CredentialValidationTestCase(testtools.TestCase):
self.update_credential_validator.validate(request_to_validate)
-class RegionValidationTestCase(testtools.TestCase):
+class RegionValidationTestCase(unit.BaseTestCase):
"""Test for V3 Region API validation."""
def setUp(self):
@@ -804,6 +884,14 @@ class RegionValidationTestCase(testtools.TestCase):
'parent_region_id': uuid.uuid4().hex}
self.create_region_validator.validate(request_to_validate)
+ def test_validate_region_create_fails_with_invalid_region_id(self):
+ """Exception raised when passing invalid `id` in request."""
+ request_to_validate = {'id': 1234,
+ 'description': 'US East Region'}
+ self.assertRaises(exception.SchemaValidationError,
+ self.create_region_validator.validate,
+ request_to_validate)
+
def test_validate_region_create_succeeds_with_extra_parameters(self):
"""Validate create region request with extra values."""
request_to_validate = {'other_attr': uuid.uuid4().hex}
@@ -830,7 +918,7 @@ class RegionValidationTestCase(testtools.TestCase):
request_to_validate)
-class ServiceValidationTestCase(testtools.TestCase):
+class ServiceValidationTestCase(unit.BaseTestCase):
"""Test for V3 Service API validation."""
def setUp(self):
@@ -985,7 +1073,7 @@ class ServiceValidationTestCase(testtools.TestCase):
request_to_validate)
-class EndpointValidationTestCase(testtools.TestCase):
+class EndpointValidationTestCase(unit.BaseTestCase):
"""Test for V3 Endpoint API validation."""
def setUp(self):
@@ -1096,6 +1184,26 @@ class EndpointValidationTestCase(testtools.TestCase):
self.create_endpoint_validator.validate,
request_to_validate)
+ def test_validate_endpoint_create_fails_with_invalid_region_id(self):
+ """Exception raised when passing invalid `region(_id)` in request."""
+ request_to_validate = {'interface': 'admin',
+ 'region_id': 1234,
+ 'service_id': uuid.uuid4().hex,
+ 'url': 'https://service.example.com:5000/'}
+
+ self.assertRaises(exception.SchemaValidationError,
+ self.create_endpoint_validator.validate,
+ request_to_validate)
+
+ request_to_validate = {'interface': 'admin',
+ 'region': 1234,
+ 'service_id': uuid.uuid4().hex,
+ 'url': 'https://service.example.com:5000/'}
+
+ self.assertRaises(exception.SchemaValidationError,
+ self.create_endpoint_validator.validate,
+ request_to_validate)
+
def test_validate_endpoint_update_fails_with_invalid_enabled(self):
"""Exception raised when `enabled` is boolean-like value."""
for invalid_enabled in _INVALID_ENABLED_FORMATS:
@@ -1163,8 +1271,28 @@ class EndpointValidationTestCase(testtools.TestCase):
self.update_endpoint_validator.validate,
request_to_validate)
+ def test_validate_endpoint_update_fails_with_invalid_region_id(self):
+ """Exception raised when passing invalid `region(_id)` in request."""
+ request_to_validate = {'interface': 'admin',
+ 'region_id': 1234,
+ 'service_id': uuid.uuid4().hex,
+ 'url': 'https://service.example.com:5000/'}
-class EndpointGroupValidationTestCase(testtools.TestCase):
+ self.assertRaises(exception.SchemaValidationError,
+ self.update_endpoint_validator.validate,
+ request_to_validate)
+
+ request_to_validate = {'interface': 'admin',
+ 'region': 1234,
+ 'service_id': uuid.uuid4().hex,
+ 'url': 'https://service.example.com:5000/'}
+
+ self.assertRaises(exception.SchemaValidationError,
+ self.update_endpoint_validator.validate,
+ request_to_validate)
+
+
+class EndpointGroupValidationTestCase(unit.BaseTestCase):
"""Test for V3 Endpoint Group API validation."""
def setUp(self):
@@ -1269,7 +1397,7 @@ class EndpointGroupValidationTestCase(testtools.TestCase):
request_to_validate)
-class TrustValidationTestCase(testtools.TestCase):
+class TrustValidationTestCase(unit.BaseTestCase):
"""Test for V3 Trust API validation."""
_valid_roles = ['member', uuid.uuid4().hex, str(uuid.uuid4())]
@@ -1360,6 +1488,13 @@ class TrustValidationTestCase(testtools.TestCase):
'remaining_uses': 2}
self.create_trust_validator.validate(request_to_validate)
+ def test_validate_trust_with_period_in_user_id_string(self):
+ """Validate trust request with a period in the user id string."""
+ request_to_validate = {'trustor_user_id': 'john.smith',
+ 'trustee_user_id': 'joe.developer',
+ 'impersonation': False}
+ self.create_trust_validator.validate(request_to_validate)
+
def test_validate_trust_with_invalid_expires_at_fails(self):
"""Validate trust request with invalid `expires_at` fails."""
request_to_validate = {'trustor_user_id': uuid.uuid4().hex,
@@ -1399,7 +1534,7 @@ class TrustValidationTestCase(testtools.TestCase):
self.create_trust_validator.validate(request_to_validate)
-class ServiceProviderValidationTestCase(testtools.TestCase):
+class ServiceProviderValidationTestCase(unit.BaseTestCase):
"""Test for V3 Service Provider API validation."""
def setUp(self):
@@ -1561,3 +1696,182 @@ class ServiceProviderValidationTestCase(testtools.TestCase):
self.assertRaises(exception.SchemaValidationError,
self.update_sp_validator.validate,
request_to_validate)
+
+
+class UserValidationTestCase(unit.BaseTestCase):
+ """Test for V3 User API validation."""
+
+ def setUp(self):
+ super(UserValidationTestCase, self).setUp()
+
+ self.user_name = uuid.uuid4().hex
+
+ create = identity_schema.user_create
+ update = identity_schema.user_update
+ self.create_user_validator = validators.SchemaValidator(create)
+ self.update_user_validator = validators.SchemaValidator(update)
+
+ def test_validate_user_create_request_succeeds(self):
+ """Test that validating a user create request succeeds."""
+ request_to_validate = {'name': self.user_name}
+ self.create_user_validator.validate(request_to_validate)
+
+ def test_validate_user_create_with_all_valid_parameters_succeeds(self):
+ """Test that validating a user create request succeeds."""
+ request_to_validate = {'name': self.user_name,
+ 'default_project_id': uuid.uuid4().hex,
+ 'domain_id': uuid.uuid4().hex,
+ 'description': uuid.uuid4().hex,
+ 'enabled': True,
+ 'email': uuid.uuid4().hex,
+ 'password': uuid.uuid4().hex}
+ self.create_user_validator.validate(request_to_validate)
+
+ def test_validate_user_create_fails_without_name(self):
+ """Exception raised when validating a user without name."""
+ request_to_validate = {'email': uuid.uuid4().hex}
+ self.assertRaises(exception.SchemaValidationError,
+ self.create_user_validator.validate,
+ request_to_validate)
+
+ def test_validate_user_create_fails_with_name_of_zero_length(self):
+ """Exception raised when validating a username with length of zero."""
+ request_to_validate = {'name': ''}
+ self.assertRaises(exception.SchemaValidationError,
+ self.create_user_validator.validate,
+ request_to_validate)
+
+ def test_validate_user_create_fails_with_name_of_wrong_type(self):
+ """Exception raised when validating a username of wrong type."""
+ request_to_validate = {'name': True}
+ self.assertRaises(exception.SchemaValidationError,
+ self.create_user_validator.validate,
+ request_to_validate)
+
+ def test_validate_user_create_succeeds_with_valid_enabled_formats(self):
+ """Validate acceptable enabled formats in create user requests."""
+ for enabled in _VALID_ENABLED_FORMATS:
+ request_to_validate = {'name': self.user_name,
+ 'enabled': enabled}
+ self.create_user_validator.validate(request_to_validate)
+
+ def test_validate_user_create_fails_with_invalid_enabled_formats(self):
+ """Exception raised when enabled is not an acceptable format."""
+ for invalid_enabled in _INVALID_ENABLED_FORMATS:
+ request_to_validate = {'name': self.user_name,
+ 'enabled': invalid_enabled}
+ self.assertRaises(exception.SchemaValidationError,
+ self.create_user_validator.validate,
+ request_to_validate)
+
+ def test_validate_user_create_succeeds_with_extra_attributes(self):
+ """Validate extra parameters on user create requests."""
+ request_to_validate = {'name': self.user_name,
+ 'other_attr': uuid.uuid4().hex}
+ self.create_user_validator.validate(request_to_validate)
+
+ def test_validate_user_create_succeeds_with_password_of_zero_length(self):
+ """Validate empty password on user create requests."""
+ request_to_validate = {'name': self.user_name,
+ 'password': ''}
+ self.create_user_validator.validate(request_to_validate)
+
+ def test_validate_user_create_succeeds_with_null_password(self):
+ """Validate that password is nullable on create user."""
+ request_to_validate = {'name': self.user_name,
+ 'password': None}
+ self.create_user_validator.validate(request_to_validate)
+
+ def test_validate_user_create_fails_with_invalid_password_type(self):
+ """Exception raised when user password is of the wrong type."""
+ request_to_validate = {'name': self.user_name,
+ 'password': True}
+ self.assertRaises(exception.SchemaValidationError,
+ self.create_user_validator.validate,
+ request_to_validate)
+
+ def test_validate_user_create_succeeds_with_null_description(self):
+ """Validate that description can be nullable on create user."""
+ request_to_validate = {'name': self.user_name,
+ 'description': None}
+ self.create_user_validator.validate(request_to_validate)
+
+ def test_validate_user_update_succeeds(self):
+ """Validate an update user request."""
+ request_to_validate = {'email': uuid.uuid4().hex}
+ self.update_user_validator.validate(request_to_validate)
+
+ def test_validate_user_update_fails_with_no_parameters(self):
+ """Exception raised when updating nothing."""
+ request_to_validate = {}
+ self.assertRaises(exception.SchemaValidationError,
+ self.update_user_validator.validate,
+ request_to_validate)
+
+ def test_validate_user_update_succeeds_with_extra_parameters(self):
+ """Validate user update requests with extra parameters."""
+ request_to_validate = {'other_attr': uuid.uuid4().hex}
+ self.update_user_validator.validate(request_to_validate)
+
+
+class GroupValidationTestCase(unit.BaseTestCase):
+ """Test for V3 Group API validation."""
+
+ def setUp(self):
+ super(GroupValidationTestCase, self).setUp()
+
+ self.group_name = uuid.uuid4().hex
+
+ create = identity_schema.group_create
+ update = identity_schema.group_update
+ self.create_group_validator = validators.SchemaValidator(create)
+ self.update_group_validator = validators.SchemaValidator(update)
+
+ def test_validate_group_create_succeeds(self):
+ """Validate create group requests."""
+ request_to_validate = {'name': self.group_name}
+ self.create_group_validator.validate(request_to_validate)
+
+ def test_validate_group_create_succeeds_with_all_parameters(self):
+ """Validate create group requests with all parameters."""
+ request_to_validate = {'name': self.group_name,
+ 'description': uuid.uuid4().hex,
+ 'domain_id': uuid.uuid4().hex}
+ self.create_group_validator.validate(request_to_validate)
+
+ def test_validate_group_create_fails_without_group_name(self):
+ """Exception raised when group name is not provided in request."""
+ request_to_validate = {'description': uuid.uuid4().hex}
+ self.assertRaises(exception.SchemaValidationError,
+ self.create_group_validator.validate,
+ request_to_validate)
+
+ def test_validate_group_create_fails_when_group_name_is_too_short(self):
+ """Exception raised when group name is equal to zero."""
+ request_to_validate = {'name': ''}
+ self.assertRaises(exception.SchemaValidationError,
+ self.create_group_validator.validate,
+ request_to_validate)
+
+ def test_validate_group_create_succeeds_with_extra_parameters(self):
+ """Validate extra attributes on group create requests."""
+ request_to_validate = {'name': self.group_name,
+ 'other_attr': uuid.uuid4().hex}
+ self.create_group_validator.validate(request_to_validate)
+
+ def test_validate_group_update_succeeds(self):
+ """Validate group update requests."""
+ request_to_validate = {'description': uuid.uuid4().hex}
+ self.update_group_validator.validate(request_to_validate)
+
+ def test_validate_group_update_fails_with_no_parameters(self):
+ """Exception raised when no parameters passed in on update."""
+ request_to_validate = {}
+ self.assertRaises(exception.SchemaValidationError,
+ self.update_group_validator.validate,
+ request_to_validate)
+
+ def test_validate_group_update_succeeds_with_extra_parameters(self):
+ """Validate group update requests with extra parameters."""
+ request_to_validate = {'other_attr': uuid.uuid4().hex}
+ self.update_group_validator.validate(request_to_validate)
diff --git a/keystone-moon/keystone/tests/unit/test_versions.py b/keystone-moon/keystone/tests/unit/test_versions.py
index 6fe692ad..7f722f94 100644
--- a/keystone-moon/keystone/tests/unit/test_versions.py
+++ b/keystone-moon/keystone/tests/unit/test_versions.py
@@ -25,6 +25,7 @@ from testtools import matchers as tt_matchers
from keystone.common import json_home
from keystone import controllers
from keystone.tests import unit as tests
+from keystone.tests.unit import utils
CONF = cfg.CONF
@@ -71,9 +72,9 @@ v3_MEDIA_TYPES = [
]
v3_EXPECTED_RESPONSE = {
- "id": "v3.0",
+ "id": "v3.4",
"status": "stable",
- "updated": "2013-03-06T00:00:00Z",
+ "updated": "2015-03-30T00:00:00Z",
"links": [
{
"rel": "self",
@@ -161,7 +162,8 @@ ENDPOINT_GROUP_ID_PARAMETER_RELATION = (
BASE_IDP_PROTOCOL = '/OS-FEDERATION/identity_providers/{idp_id}/protocols'
BASE_EP_POLICY = '/policies/{policy_id}/OS-ENDPOINT-POLICY'
-BASE_EP_FILTER = '/OS-EP-FILTER/endpoint_groups/{endpoint_group_id}'
+BASE_EP_FILTER_PREFIX = '/OS-EP-FILTER'
+BASE_EP_FILTER = BASE_EP_FILTER_PREFIX + '/endpoint_groups/{endpoint_group_id}'
BASE_ACCESS_TOKEN = (
'/users/{user_id}/OS-OAUTH1/access_tokens/{access_token_id}')
@@ -352,6 +354,8 @@ V3_JSON_HOME_RESOURCES_INHERIT_DISABLED = {
'href': '/OS-FEDERATION/projects'},
_build_federation_rel(resource_name='saml2'): {
'href': '/auth/OS-FEDERATION/saml2'},
+ _build_federation_rel(resource_name='ecp'): {
+ 'href': '/auth/OS-FEDERATION/saml2/ecp'},
_build_federation_rel(resource_name='metadata'): {
'href': '/OS-FEDERATION/saml2/metadata'},
_build_federation_rel(resource_name='identity_providers'): {
@@ -474,6 +478,12 @@ V3_JSON_HOME_RESOURCES_INHERIT_DISABLED = {
'href-template': BASE_EP_FILTER + '/endpoints',
'href-vars': {'endpoint_group_id':
ENDPOINT_GROUP_ID_PARAMETER_RELATION, }},
+ _build_ep_filter_rel(resource_name='project_endpoint_groups'):
+ {
+ 'href-template': (BASE_EP_FILTER_PREFIX + '/projects/{project_id}' +
+ '/endpoint_groups'),
+ 'href-vars': {'project_id':
+ json_home.Parameters.PROJECT_ID, }},
_build_ep_filter_rel(resource_name='project_endpoint'):
{
'href-template': ('/OS-EP-FILTER/projects/{project_id}'
@@ -635,9 +645,11 @@ class VersionTestCase(tests.TestCase):
def config_overrides(self):
super(VersionTestCase, self).config_overrides()
- port = random.randint(10000, 30000)
- self.config_fixture.config(group='eventlet_server', public_port=port,
- admin_port=port)
+ admin_port = random.randint(10000, 30000)
+ public_port = random.randint(40000, 60000)
+ self.config_fixture.config(group='eventlet_server',
+ public_port=public_port,
+ admin_port=admin_port)
def _paste_in_port(self, response, port):
for link in response['links']:
@@ -651,7 +663,7 @@ class VersionTestCase(tests.TestCase):
data = jsonutils.loads(resp.body)
expected = VERSIONS_RESPONSE
for version in expected['versions']['values']:
- if version['id'] == 'v3.0':
+ if version['id'].startswith('v3'):
self._paste_in_port(
version, 'http://localhost:%s/v3/' %
CONF.eventlet_server.public_port)
@@ -668,7 +680,7 @@ class VersionTestCase(tests.TestCase):
data = jsonutils.loads(resp.body)
expected = VERSIONS_RESPONSE
for version in expected['versions']['values']:
- if version['id'] == 'v3.0':
+ if version['id'].startswith('v3'):
self._paste_in_port(
version, 'http://localhost:%s/v3/' %
CONF.eventlet_server.admin_port)
@@ -689,7 +701,7 @@ class VersionTestCase(tests.TestCase):
expected = VERSIONS_RESPONSE
for version in expected['versions']['values']:
# localhost happens to be the site url for tests
- if version['id'] == 'v3.0':
+ if version['id'].startswith('v3'):
self._paste_in_port(
version, 'http://localhost/v3/')
elif version['id'] == 'v2.0':
@@ -741,8 +753,9 @@ class VersionTestCase(tests.TestCase):
CONF.eventlet_server.public_port)
self.assertEqual(expected, data)
+ @utils.wip('waiting on bug #1381961')
def test_admin_version_v3(self):
- client = tests.TestClient(self.public_app)
+ client = tests.TestClient(self.admin_app)
resp = client.get('/v3/')
self.assertEqual(200, resp.status_int)
data = jsonutils.loads(resp.body)
@@ -931,9 +944,11 @@ class VersionSingleAppTestCase(tests.TestCase):
def config_overrides(self):
super(VersionSingleAppTestCase, self).config_overrides()
- port = random.randint(10000, 30000)
- self.config_fixture.config(group='eventlet_server', public_port=port,
- admin_port=port)
+ admin_port = random.randint(10000, 30000)
+ public_port = random.randint(40000, 60000)
+ self.config_fixture.config(group='eventlet_server',
+ public_port=public_port,
+ admin_port=admin_port)
def _paste_in_port(self, response, port):
for link in response['links']:
@@ -941,6 +956,11 @@ class VersionSingleAppTestCase(tests.TestCase):
link['href'] = port
def _test_version(self, app_name):
+ def app_port():
+ if app_name == 'admin':
+ return CONF.eventlet_server.admin_port
+ else:
+ return CONF.eventlet_server.public_port
app = self.loadapp('keystone', app_name)
client = tests.TestClient(app)
resp = client.get('/')
@@ -948,14 +968,12 @@ class VersionSingleAppTestCase(tests.TestCase):
data = jsonutils.loads(resp.body)
expected = VERSIONS_RESPONSE
for version in expected['versions']['values']:
- if version['id'] == 'v3.0':
+ if version['id'].startswith('v3'):
self._paste_in_port(
- version, 'http://localhost:%s/v3/' %
- CONF.eventlet_server.public_port)
+ version, 'http://localhost:%s/v3/' % app_port())
elif version['id'] == 'v2.0':
self._paste_in_port(
- version, 'http://localhost:%s/v2.0/' %
- CONF.eventlet_server.public_port)
+ version, 'http://localhost:%s/v2.0/' % app_port())
self.assertThat(data, _VersionsEqual(expected))
def test_public(self):
@@ -978,9 +996,11 @@ class VersionInheritEnabledTestCase(tests.TestCase):
def config_overrides(self):
super(VersionInheritEnabledTestCase, self).config_overrides()
- port = random.randint(10000, 30000)
- self.config_fixture.config(group='eventlet_server', public_port=port,
- admin_port=port)
+ admin_port = random.randint(10000, 30000)
+ public_port = random.randint(40000, 60000)
+ self.config_fixture.config(group='eventlet_server',
+ public_port=public_port,
+ admin_port=admin_port)
self.config_fixture.config(group='os_inherit', enabled=True)
@@ -1021,7 +1041,7 @@ class VersionBehindSslTestCase(tests.TestCase):
def _get_expected(self, host):
expected = VERSIONS_RESPONSE
for version in expected['versions']['values']:
- if version['id'] == 'v3.0':
+ if version['id'].startswith('v3'):
self._paste_in_port(version, host + 'v3/')
elif version['id'] == 'v2.0':
self._paste_in_port(version, host + 'v2.0/')
diff --git a/keystone-moon/keystone/tests/unit/test_wsgi.py b/keystone-moon/keystone/tests/unit/test_wsgi.py
index 1785dd00..62156bd5 100644
--- a/keystone-moon/keystone/tests/unit/test_wsgi.py
+++ b/keystone-moon/keystone/tests/unit/test_wsgi.py
@@ -1,3 +1,5 @@
+# encoding: utf-8
+#
# Copyright 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
@@ -16,6 +18,7 @@ import gettext
import socket
import uuid
+import eventlet
import mock
import oslo_i18n
from oslo_serialization import jsonutils
@@ -49,6 +52,22 @@ class FakeAttributeCheckerApp(wsgi.Application):
self._require_attributes(ref, attr)
+class RouterTest(tests.TestCase):
+ def setUp(self):
+ self.router = wsgi.RoutersBase()
+ super(RouterTest, self).setUp()
+
+ def test_invalid_status(self):
+ fake_mapper = uuid.uuid4().hex
+ fake_controller = uuid.uuid4().hex
+ fake_path = uuid.uuid4().hex
+ fake_rel = uuid.uuid4().hex
+ self.assertRaises(exception.Error,
+ self.router._add_resource,
+ fake_mapper, fake_controller, fake_path, fake_rel,
+ status=uuid.uuid4().hex)
+
+
class BaseWSGITest(tests.TestCase):
def setUp(self):
self.app = FakeApp()
@@ -185,6 +204,26 @@ class ApplicationTest(BaseWSGITest):
self.assertEqual(401, resp.status_int)
+ def test_improperly_encoded_params(self):
+ class FakeApp(wsgi.Application):
+ def index(self, context):
+ return context['query_string']
+ # this is high bit set ASCII, copy & pasted from Windows.
+ # aka code page 1252. It is not valid UTF8.
+ req = self._make_request(url='/?name=nonexit%E8nt')
+ self.assertRaises(exception.ValidationError, req.get_response,
+ FakeApp())
+
+ def test_properly_encoded_params(self):
+ class FakeApp(wsgi.Application):
+ def index(self, context):
+ return context['query_string']
+ # nonexitènt encoded as UTF-8
+ req = self._make_request(url='/?name=nonexit%C3%A8nt')
+ resp = req.get_response(FakeApp())
+ self.assertEqual({'name': u'nonexit\xe8nt'},
+ jsonutils.loads(resp.body))
+
class ExtensionRouterTest(BaseWSGITest):
def test_extensionrouter_local_config(self):
@@ -425,3 +464,43 @@ class ServerTest(tests.TestCase):
1)
self.assertTrue(mock_listen.called)
+
+ def test_client_socket_timeout(self):
+ # mocking server method of eventlet.wsgi to check it is called with
+ # configured 'client_socket_timeout' value.
+ for socket_timeout in range(1, 10):
+ self.config_fixture.config(group='eventlet_server',
+ client_socket_timeout=socket_timeout)
+ server = environment.Server(mock.MagicMock(), host=self.host,
+ port=self.port)
+ with mock.patch.object(eventlet.wsgi, 'server') as mock_server:
+ fake_application = uuid.uuid4().hex
+ fake_socket = uuid.uuid4().hex
+ server._run(fake_application, fake_socket)
+ mock_server.assert_called_once_with(
+ fake_socket,
+ fake_application,
+ debug=mock.ANY,
+ socket_timeout=socket_timeout,
+ log=mock.ANY,
+ keepalive=mock.ANY)
+
+ def test_wsgi_keep_alive(self):
+ # mocking server method of eventlet.wsgi to check it is called with
+ # configured 'wsgi_keep_alive' value.
+ wsgi_keepalive = False
+ self.config_fixture.config(group='eventlet_server',
+ wsgi_keep_alive=wsgi_keepalive)
+
+ server = environment.Server(mock.MagicMock(), host=self.host,
+ port=self.port)
+ with mock.patch.object(eventlet.wsgi, 'server') as mock_server:
+ fake_application = uuid.uuid4().hex
+ fake_socket = uuid.uuid4().hex
+ server._run(fake_application, fake_socket)
+ mock_server.assert_called_once_with(fake_socket,
+ fake_application,
+ debug=mock.ANY,
+ socket_timeout=mock.ANY,
+ log=mock.ANY,
+ keepalive=wsgi_keepalive)
diff --git a/keystone-moon/keystone/tests/unit/tests/test_core.py b/keystone-moon/keystone/tests/unit/tests/test_core.py
index 86c91a8d..2de51c52 100644
--- a/keystone-moon/keystone/tests/unit/tests/test_core.py
+++ b/keystone-moon/keystone/tests/unit/tests/test_core.py
@@ -47,16 +47,7 @@ class TestTestCase(tests.TestCase):
lambda: warnings.warn('test sa warning error', exc.SAWarning),
matchers.raises(exc.SAWarning))
- def test_deprecations(self):
- # If any deprecation warnings occur during testing it's raised as
- # exception.
-
- def use_deprecated():
- # DeprecationWarning: BaseException.message has been deprecated as
- # of Python 2.6
- try:
- raise Exception('something')
- except Exception as e:
- e.message
-
- self.assertThat(use_deprecated, matchers.raises(DeprecationWarning))
+ def test_deprecation_warnings_are_raised_as_exceptions_in_tests(self):
+ self.assertThat(
+ lambda: warnings.warn('this is deprecated', DeprecationWarning),
+ matchers.raises(DeprecationWarning))
diff --git a/keystone-moon/keystone/tests/unit/token/test_fernet_provider.py b/keystone-moon/keystone/tests/unit/token/test_fernet_provider.py
index 23fc0214..4101369c 100644
--- a/keystone-moon/keystone/tests/unit/token/test_fernet_provider.py
+++ b/keystone-moon/keystone/tests/unit/token/test_fernet_provider.py
@@ -11,17 +11,21 @@
# under the License.
import datetime
+import hashlib
+import os
import uuid
from oslo_utils import timeutils
from keystone.common import config
+from keystone.common import utils
from keystone import exception
from keystone.tests import unit as tests
from keystone.tests.unit import ksfixtures
from keystone.token import provider
from keystone.token.providers import fernet
from keystone.token.providers.fernet import token_formatters
+from keystone.token.providers.fernet import utils as fernet_utils
CONF = config.CONF
@@ -33,21 +37,21 @@ class TestFernetTokenProvider(tests.TestCase):
self.useFixture(ksfixtures.KeyRepository(self.config_fixture))
self.provider = fernet.Provider()
- def test_get_token_id_raises_not_implemented(self):
- """Test that an exception is raised when calling _get_token_id."""
- token_data = {}
- self.assertRaises(exception.NotImplemented,
- self.provider._get_token_id, token_data)
+ def test_supports_bind_authentication_returns_false(self):
+ self.assertFalse(self.provider._supports_bind_authentication)
- def test_invalid_v3_token_raises_401(self):
+ def test_needs_persistence_returns_false(self):
+ self.assertFalse(self.provider.needs_persistence())
+
+ def test_invalid_v3_token_raises_404(self):
self.assertRaises(
- exception.Unauthorized,
+ exception.TokenNotFound,
self.provider.validate_v3_token,
uuid.uuid4().hex)
- def test_invalid_v2_token_raises_401(self):
+ def test_invalid_v2_token_raises_404(self):
self.assertRaises(
- exception.Unauthorized,
+ exception.TokenNotFound,
self.provider.validate_v2_token,
uuid.uuid4().hex)
@@ -69,7 +73,7 @@ class TestPayloads(tests.TestCase):
def test_time_string_to_int_conversions(self):
payload_cls = token_formatters.BasePayload
- expected_time_str = timeutils.isotime()
+ expected_time_str = utils.isotime(subsecond=True)
time_obj = timeutils.parse_isotime(expected_time_str)
expected_time_int = (
(timeutils.normalize_time(time_obj) -
@@ -86,7 +90,7 @@ class TestPayloads(tests.TestCase):
def test_unscoped_payload(self):
exp_user_id = uuid.uuid4().hex
exp_methods = ['password']
- exp_expires_at = timeutils.isotime(timeutils.utcnow())
+ exp_expires_at = utils.isotime(timeutils.utcnow(), subsecond=True)
exp_audit_ids = [provider.random_urlsafe_str()]
payload = token_formatters.UnscopedPayload.assemble(
@@ -104,7 +108,7 @@ class TestPayloads(tests.TestCase):
exp_user_id = uuid.uuid4().hex
exp_methods = ['password']
exp_project_id = uuid.uuid4().hex
- exp_expires_at = timeutils.isotime(timeutils.utcnow())
+ exp_expires_at = utils.isotime(timeutils.utcnow(), subsecond=True)
exp_audit_ids = [provider.random_urlsafe_str()]
payload = token_formatters.ProjectScopedPayload.assemble(
@@ -124,7 +128,7 @@ class TestPayloads(tests.TestCase):
exp_user_id = uuid.uuid4().hex
exp_methods = ['password']
exp_domain_id = uuid.uuid4().hex
- exp_expires_at = timeutils.isotime(timeutils.utcnow())
+ exp_expires_at = utils.isotime(timeutils.utcnow(), subsecond=True)
exp_audit_ids = [provider.random_urlsafe_str()]
payload = token_formatters.DomainScopedPayload.assemble(
@@ -144,7 +148,7 @@ class TestPayloads(tests.TestCase):
exp_user_id = uuid.uuid4().hex
exp_methods = ['password']
exp_domain_id = CONF.identity.default_domain_id
- exp_expires_at = timeutils.isotime(timeutils.utcnow())
+ exp_expires_at = utils.isotime(timeutils.utcnow(), subsecond=True)
exp_audit_ids = [provider.random_urlsafe_str()]
payload = token_formatters.DomainScopedPayload.assemble(
@@ -164,7 +168,128 @@ class TestPayloads(tests.TestCase):
exp_user_id = uuid.uuid4().hex
exp_methods = ['password']
exp_project_id = uuid.uuid4().hex
- exp_expires_at = timeutils.isotime(timeutils.utcnow())
+ exp_expires_at = utils.isotime(timeutils.utcnow(), subsecond=True)
+ exp_audit_ids = [provider.random_urlsafe_str()]
+ exp_trust_id = uuid.uuid4().hex
+
+ payload = token_formatters.TrustScopedPayload.assemble(
+ exp_user_id, exp_methods, exp_project_id, exp_expires_at,
+ exp_audit_ids, exp_trust_id)
+
+ (user_id, methods, project_id, expires_at, audit_ids, trust_id) = (
+ token_formatters.TrustScopedPayload.disassemble(payload))
+
+ self.assertEqual(exp_user_id, user_id)
+ self.assertEqual(exp_methods, methods)
+ self.assertEqual(exp_project_id, project_id)
+ self.assertEqual(exp_expires_at, expires_at)
+ self.assertEqual(exp_audit_ids, audit_ids)
+ self.assertEqual(exp_trust_id, trust_id)
+
+ def test_unscoped_payload_with_non_uuid_user_id(self):
+ exp_user_id = 'someNonUuidUserId'
+ exp_methods = ['password']
+ exp_expires_at = utils.isotime(timeutils.utcnow(), subsecond=True)
+ exp_audit_ids = [provider.random_urlsafe_str()]
+
+ payload = token_formatters.UnscopedPayload.assemble(
+ exp_user_id, exp_methods, exp_expires_at, exp_audit_ids)
+
+ (user_id, methods, expires_at, audit_ids) = (
+ token_formatters.UnscopedPayload.disassemble(payload))
+
+ self.assertEqual(exp_user_id, user_id)
+ self.assertEqual(exp_methods, methods)
+ self.assertEqual(exp_expires_at, expires_at)
+ self.assertEqual(exp_audit_ids, audit_ids)
+
+ def test_project_scoped_payload_with_non_uuid_user_id(self):
+ exp_user_id = 'someNonUuidUserId'
+ exp_methods = ['password']
+ exp_project_id = uuid.uuid4().hex
+ exp_expires_at = utils.isotime(timeutils.utcnow(), subsecond=True)
+ exp_audit_ids = [provider.random_urlsafe_str()]
+
+ payload = token_formatters.ProjectScopedPayload.assemble(
+ exp_user_id, exp_methods, exp_project_id, exp_expires_at,
+ exp_audit_ids)
+
+ (user_id, methods, project_id, expires_at, audit_ids) = (
+ token_formatters.ProjectScopedPayload.disassemble(payload))
+
+ self.assertEqual(exp_user_id, user_id)
+ self.assertEqual(exp_methods, methods)
+ self.assertEqual(exp_project_id, project_id)
+ self.assertEqual(exp_expires_at, expires_at)
+ self.assertEqual(exp_audit_ids, audit_ids)
+
+ def test_project_scoped_payload_with_non_uuid_project_id(self):
+ exp_user_id = uuid.uuid4().hex
+ exp_methods = ['password']
+ exp_project_id = 'someNonUuidProjectId'
+ exp_expires_at = utils.isotime(timeutils.utcnow(), subsecond=True)
+ exp_audit_ids = [provider.random_urlsafe_str()]
+
+ payload = token_formatters.ProjectScopedPayload.assemble(
+ exp_user_id, exp_methods, exp_project_id, exp_expires_at,
+ exp_audit_ids)
+
+ (user_id, methods, project_id, expires_at, audit_ids) = (
+ token_formatters.ProjectScopedPayload.disassemble(payload))
+
+ self.assertEqual(exp_user_id, user_id)
+ self.assertEqual(exp_methods, methods)
+ self.assertEqual(exp_project_id, project_id)
+ self.assertEqual(exp_expires_at, expires_at)
+ self.assertEqual(exp_audit_ids, audit_ids)
+
+ def test_domain_scoped_payload_with_non_uuid_user_id(self):
+ exp_user_id = 'someNonUuidUserId'
+ exp_methods = ['password']
+ exp_domain_id = uuid.uuid4().hex
+ exp_expires_at = utils.isotime(timeutils.utcnow(), subsecond=True)
+ exp_audit_ids = [provider.random_urlsafe_str()]
+
+ payload = token_formatters.DomainScopedPayload.assemble(
+ exp_user_id, exp_methods, exp_domain_id, exp_expires_at,
+ exp_audit_ids)
+
+ (user_id, methods, domain_id, expires_at, audit_ids) = (
+ token_formatters.DomainScopedPayload.disassemble(payload))
+
+ self.assertEqual(exp_user_id, user_id)
+ self.assertEqual(exp_methods, methods)
+ self.assertEqual(exp_domain_id, domain_id)
+ self.assertEqual(exp_expires_at, expires_at)
+ self.assertEqual(exp_audit_ids, audit_ids)
+
+ def test_trust_scoped_payload_with_non_uuid_user_id(self):
+ exp_user_id = 'someNonUuidUserId'
+ exp_methods = ['password']
+ exp_project_id = uuid.uuid4().hex
+ exp_expires_at = utils.isotime(timeutils.utcnow(), subsecond=True)
+ exp_audit_ids = [provider.random_urlsafe_str()]
+ exp_trust_id = uuid.uuid4().hex
+
+ payload = token_formatters.TrustScopedPayload.assemble(
+ exp_user_id, exp_methods, exp_project_id, exp_expires_at,
+ exp_audit_ids, exp_trust_id)
+
+ (user_id, methods, project_id, expires_at, audit_ids, trust_id) = (
+ token_formatters.TrustScopedPayload.disassemble(payload))
+
+ self.assertEqual(exp_user_id, user_id)
+ self.assertEqual(exp_methods, methods)
+ self.assertEqual(exp_project_id, project_id)
+ self.assertEqual(exp_expires_at, expires_at)
+ self.assertEqual(exp_audit_ids, audit_ids)
+ self.assertEqual(exp_trust_id, trust_id)
+
+ def test_trust_scoped_payload_with_non_uuid_project_id(self):
+ exp_user_id = uuid.uuid4().hex
+ exp_methods = ['password']
+ exp_project_id = 'someNonUuidProjectId'
+ exp_expires_at = utils.isotime(timeutils.utcnow(), subsecond=True)
exp_audit_ids = [provider.random_urlsafe_str()]
exp_trust_id = uuid.uuid4().hex
@@ -181,3 +306,218 @@ class TestPayloads(tests.TestCase):
self.assertEqual(exp_expires_at, expires_at)
self.assertEqual(exp_audit_ids, audit_ids)
self.assertEqual(exp_trust_id, trust_id)
+
+ def test_federated_payload_with_non_uuid_ids(self):
+ exp_user_id = 'someNonUuidUserId'
+ exp_methods = ['password']
+ exp_expires_at = utils.isotime(timeutils.utcnow(), subsecond=True)
+ exp_audit_ids = [provider.random_urlsafe_str()]
+ exp_federated_info = {'group_ids': [{'id': 'someNonUuidGroupId'}],
+ 'idp_id': uuid.uuid4().hex,
+ 'protocol_id': uuid.uuid4().hex}
+
+ payload = token_formatters.FederatedUnscopedPayload.assemble(
+ exp_user_id, exp_methods, exp_expires_at, exp_audit_ids,
+ exp_federated_info)
+
+ (user_id, methods, expires_at, audit_ids, federated_info) = (
+ token_formatters.FederatedUnscopedPayload.disassemble(payload))
+
+ self.assertEqual(exp_user_id, user_id)
+ self.assertEqual(exp_methods, methods)
+ self.assertEqual(exp_expires_at, expires_at)
+ self.assertEqual(exp_audit_ids, audit_ids)
+ self.assertEqual(exp_federated_info['group_ids'][0]['id'],
+ federated_info['group_ids'][0]['id'])
+ self.assertEqual(exp_federated_info['idp_id'],
+ federated_info['idp_id'])
+ self.assertEqual(exp_federated_info['protocol_id'],
+ federated_info['protocol_id'])
+
+ def test_federated_project_scoped_payload(self):
+ exp_user_id = 'someNonUuidUserId'
+ exp_methods = ['token']
+ exp_project_id = uuid.uuid4().hex
+ exp_expires_at = utils.isotime(timeutils.utcnow(), subsecond=True)
+ exp_audit_ids = [provider.random_urlsafe_str()]
+ exp_federated_info = {'group_ids': [{'id': 'someNonUuidGroupId'}],
+ 'idp_id': uuid.uuid4().hex,
+ 'protocol_id': uuid.uuid4().hex}
+
+ payload = token_formatters.FederatedProjectScopedPayload.assemble(
+ exp_user_id, exp_methods, exp_project_id, exp_expires_at,
+ exp_audit_ids, exp_federated_info)
+
+ (user_id, methods, project_id, expires_at, audit_ids,
+ federated_info) = (
+ token_formatters.FederatedProjectScopedPayload.disassemble(
+ payload))
+
+ self.assertEqual(exp_user_id, user_id)
+ self.assertEqual(exp_methods, methods)
+ self.assertEqual(exp_project_id, project_id)
+ self.assertEqual(exp_expires_at, expires_at)
+ self.assertEqual(exp_audit_ids, audit_ids)
+ self.assertDictEqual(exp_federated_info, federated_info)
+
+ def test_federated_domain_scoped_payload(self):
+ exp_user_id = 'someNonUuidUserId'
+ exp_methods = ['token']
+ exp_domain_id = uuid.uuid4().hex
+ exp_expires_at = utils.isotime(timeutils.utcnow(), subsecond=True)
+ exp_audit_ids = [provider.random_urlsafe_str()]
+ exp_federated_info = {'group_ids': [{'id': 'someNonUuidGroupId'}],
+ 'idp_id': uuid.uuid4().hex,
+ 'protocol_id': uuid.uuid4().hex}
+
+ payload = token_formatters.FederatedDomainScopedPayload.assemble(
+ exp_user_id, exp_methods, exp_domain_id, exp_expires_at,
+ exp_audit_ids, exp_federated_info)
+
+ (user_id, methods, domain_id, expires_at, audit_ids,
+ federated_info) = (
+ token_formatters.FederatedDomainScopedPayload.disassemble(
+ payload))
+
+ self.assertEqual(exp_user_id, user_id)
+ self.assertEqual(exp_methods, methods)
+ self.assertEqual(exp_domain_id, domain_id)
+ self.assertEqual(exp_expires_at, expires_at)
+ self.assertEqual(exp_audit_ids, audit_ids)
+ self.assertDictEqual(exp_federated_info, federated_info)
+
+
+class TestFernetKeyRotation(tests.TestCase):
+ def setUp(self):
+ super(TestFernetKeyRotation, self).setUp()
+
+ # A collection of all previously-seen signatures of the key
+ # repository's contents.
+ self.key_repo_signatures = set()
+
+ @property
+ def keys(self):
+ """Key files converted to numbers."""
+ return sorted(
+ int(x) for x in os.listdir(CONF.fernet_tokens.key_repository))
+
+ @property
+ def key_repository_size(self):
+ """The number of keys in the key repository."""
+ return len(self.keys)
+
+ @property
+ def key_repository_signature(self):
+ """Create a "thumbprint" of the current key repository.
+
+ Because key files are renamed, this produces a hash of the contents of
+ the key files, ignoring their filenames.
+
+ The resulting signature can be used, for example, to ensure that you
+ have a unique set of keys after you perform a key rotation (taking a
+ static set of keys, and simply shuffling them, would fail such a test).
+
+ """
+ # Load the keys into a list.
+ keys = fernet_utils.load_keys()
+
+ # Sort the list of keys by the keys themselves (they were previously
+ # sorted by filename).
+ keys.sort()
+
+ # Create the thumbprint using all keys in the repository.
+ signature = hashlib.sha1()
+ for key in keys:
+ signature.update(key)
+ return signature.hexdigest()
+
+ def assertRepositoryState(self, expected_size):
+ """Validate the state of the key repository."""
+ self.assertEqual(expected_size, self.key_repository_size)
+ self.assertUniqueRepositoryState()
+
+ def assertUniqueRepositoryState(self):
+ """Ensures that the current key repo state has not been seen before."""
+ # This is assigned to a variable because it takes some work to
+ # calculate.
+ signature = self.key_repository_signature
+
+ # Ensure the signature is not in the set of previously seen signatures.
+ self.assertNotIn(signature, self.key_repo_signatures)
+
+ # Add the signature to the set of repository signatures to validate
+ # that we don't see it again later.
+ self.key_repo_signatures.add(signature)
+
+ def test_rotation(self):
+ # Initializing a key repository results in this many keys. We don't
+ # support max_active_keys being set any lower.
+ min_active_keys = 2
+
+ # Simulate every rotation strategy up to "rotating once a week while
+ # maintaining a year's worth of keys."
+ for max_active_keys in range(min_active_keys, 52 + 1):
+ self.config_fixture.config(group='fernet_tokens',
+ max_active_keys=max_active_keys)
+
+ # Ensure that resetting the key repository always results in 2
+ # active keys.
+ self.useFixture(ksfixtures.KeyRepository(self.config_fixture))
+
+ # Validate the initial repository state.
+ self.assertRepositoryState(expected_size=min_active_keys)
+
+ # The repository should be initialized with a staged key (0) and a
+ # primary key (1). The next key is just auto-incremented.
+ exp_keys = [0, 1]
+ next_key_number = exp_keys[-1] + 1 # keep track of next key
+ self.assertEqual(exp_keys, self.keys)
+
+ # Rotate the keys just enough times to fully populate the key
+ # repository.
+ for rotation in range(max_active_keys - min_active_keys):
+ fernet_utils.rotate_keys()
+ self.assertRepositoryState(expected_size=rotation + 3)
+
+ exp_keys.append(next_key_number)
+ next_key_number += 1
+ self.assertEqual(exp_keys, self.keys)
+
+ # We should have a fully populated key repository now.
+ self.assertEqual(max_active_keys, self.key_repository_size)
+
+ # Rotate an additional number of times to ensure that we maintain
+ # the desired number of active keys.
+ for rotation in range(10):
+ fernet_utils.rotate_keys()
+ self.assertRepositoryState(expected_size=max_active_keys)
+
+ exp_keys.pop(1)
+ exp_keys.append(next_key_number)
+ next_key_number += 1
+ self.assertEqual(exp_keys, self.keys)
+
+ def test_non_numeric_files(self):
+ self.useFixture(ksfixtures.KeyRepository(self.config_fixture))
+ evil_file = os.path.join(CONF.fernet_tokens.key_repository, '99.bak')
+ with open(evil_file, 'w'):
+ pass
+ fernet_utils.rotate_keys()
+ self.assertTrue(os.path.isfile(evil_file))
+ keys = 0
+ for x in os.listdir(CONF.fernet_tokens.key_repository):
+ if x == '99.bak':
+ continue
+ keys += 1
+ self.assertEqual(3, keys)
+
+
+class TestLoadKeys(tests.TestCase):
+ def test_non_numeric_files(self):
+ self.useFixture(ksfixtures.KeyRepository(self.config_fixture))
+ evil_file = os.path.join(CONF.fernet_tokens.key_repository, '~1')
+ with open(evil_file, 'w'):
+ pass
+ keys = fernet_utils.load_keys()
+ self.assertEqual(2, len(keys))
+ self.assertTrue(len(keys[0]))
diff --git a/keystone-moon/keystone/tests/unit/token/test_pki_provider.py b/keystone-moon/keystone/tests/unit/token/test_pki_provider.py
new file mode 100644
index 00000000..dad31266
--- /dev/null
+++ b/keystone-moon/keystone/tests/unit/token/test_pki_provider.py
@@ -0,0 +1,26 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from keystone.tests import unit as tests
+from keystone.token.providers import pki
+
+
+class TestPkiTokenProvider(tests.TestCase):
+ def setUp(self):
+ super(TestPkiTokenProvider, self).setUp()
+ self.provider = pki.Provider()
+
+ def test_supports_bind_authentication_returns_true(self):
+ self.assertTrue(self.provider._supports_bind_authentication)
+
+ def test_need_persistence_return_true(self):
+ self.assertIs(True, self.provider.needs_persistence())
diff --git a/keystone-moon/keystone/tests/unit/token/test_pkiz_provider.py b/keystone-moon/keystone/tests/unit/token/test_pkiz_provider.py
new file mode 100644
index 00000000..4a492bc1
--- /dev/null
+++ b/keystone-moon/keystone/tests/unit/token/test_pkiz_provider.py
@@ -0,0 +1,26 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from keystone.tests import unit as tests
+from keystone.token.providers import pkiz
+
+
+class TestPkizTokenProvider(tests.TestCase):
+ def setUp(self):
+ super(TestPkizTokenProvider, self).setUp()
+ self.provider = pkiz.Provider()
+
+ def test_supports_bind_authentication_returns_true(self):
+ self.assertTrue(self.provider._supports_bind_authentication)
+
+ def test_need_persistence_return_true(self):
+ self.assertIs(True, self.provider.needs_persistence())
diff --git a/keystone-moon/keystone/tests/unit/token/test_provider.py b/keystone-moon/keystone/tests/unit/token/test_provider.py
index e5910690..be831484 100644
--- a/keystone-moon/keystone/tests/unit/token/test_provider.py
+++ b/keystone-moon/keystone/tests/unit/token/test_provider.py
@@ -10,7 +10,8 @@
# License for the specific language governing permissions and limitations
# under the License.
-import urllib
+import six
+from six.moves import urllib
from keystone.tests import unit
from keystone.token import provider
@@ -19,11 +20,11 @@ from keystone.token import provider
class TestRandomStrings(unit.BaseTestCase):
def test_strings_are_url_safe(self):
s = provider.random_urlsafe_str()
- self.assertEqual(s, urllib.quote_plus(s))
+ self.assertEqual(s, urllib.parse.quote_plus(s))
def test_strings_can_be_converted_to_bytes(self):
s = provider.random_urlsafe_str()
- self.assertTrue(isinstance(s, basestring))
+ self.assertTrue(isinstance(s, six.string_types))
b = provider.random_urlsafe_str_to_bytes(s)
self.assertTrue(isinstance(b, bytes))
diff --git a/keystone-moon/keystone/tests/unit/token/test_token_model.py b/keystone-moon/keystone/tests/unit/token/test_token_model.py
index b2474289..3959d901 100644
--- a/keystone-moon/keystone/tests/unit/token/test_token_model.py
+++ b/keystone-moon/keystone/tests/unit/token/test_token_model.py
@@ -15,7 +15,9 @@ import uuid
from oslo_config import cfg
from oslo_utils import timeutils
+from six.moves import range
+from keystone.contrib.federation import constants as federation_constants
from keystone import exception
from keystone.models import token_model
from keystone.tests.unit import core
@@ -127,7 +129,7 @@ class TestKeystoneTokenModel(core.TestCase):
self.assertIsNone(token_data.federation_protocol_id)
self.assertIsNone(token_data.federation_idp_id)
- token_data['user'][token_model.federation.FEDERATION] = federation_data
+ token_data['user'][federation_constants.FEDERATION] = federation_data
self.assertTrue(token_data.is_federated_user)
self.assertEqual([x['id'] for x in federation_data['groups']],
@@ -149,7 +151,7 @@ class TestKeystoneTokenModel(core.TestCase):
self.assertIsNone(token_data.federation_protocol_id)
self.assertIsNone(token_data.federation_idp_id)
- token_data['user'][token_model.federation.FEDERATION] = federation_data
+ token_data['user'][federation_constants.FEDERATION] = federation_data
# Federated users should not exist in V2, the data should remain empty
self.assertFalse(token_data.is_federated_user)
diff --git a/keystone-moon/keystone/tests/unit/token/test_uuid_provider.py b/keystone-moon/keystone/tests/unit/token/test_uuid_provider.py
new file mode 100644
index 00000000..b49427f0
--- /dev/null
+++ b/keystone-moon/keystone/tests/unit/token/test_uuid_provider.py
@@ -0,0 +1,26 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from keystone.tests import unit as tests
+from keystone.token.providers import uuid
+
+
+class TestUuidTokenProvider(tests.TestCase):
+ def setUp(self):
+ super(TestUuidTokenProvider, self).setUp()
+ self.provider = uuid.Provider()
+
+ def test_supports_bind_authentication_returns_true(self):
+ self.assertTrue(self.provider._supports_bind_authentication)
+
+ def test_need_persistence_return_true(self):
+ self.assertIs(True, self.provider.needs_persistence())
diff --git a/keystone-moon/keystone/token/controllers.py b/keystone-moon/keystone/token/controllers.py
index 3304acb5..ff65e733 100644
--- a/keystone-moon/keystone/token/controllers.py
+++ b/keystone-moon/keystone/token/controllers.py
@@ -15,6 +15,7 @@
import datetime
import sys
+from keystone.common import utils
from keystoneclient.common import cms
from oslo_config import cfg
from oslo_log import log
@@ -118,7 +119,8 @@ class Auth(controller.V2Controller):
# format.
user_ref = self.v3_to_v2_user(user_ref)
if tenant_ref:
- tenant_ref = self.filter_domain_id(tenant_ref)
+ tenant_ref = self.v3_to_v2_project(tenant_ref)
+
auth_token_data = self._get_auth_token_data(user_ref,
tenant_ref,
metadata_ref,
@@ -193,8 +195,9 @@ class Auth(controller.V2Controller):
if not CONF.trust.enabled and 'trust_id' in auth:
raise exception.Forbidden('Trusts are disabled.')
elif CONF.trust.enabled and 'trust_id' in auth:
- trust_ref = self.trust_api.get_trust(auth['trust_id'])
- if trust_ref is None:
+ try:
+ trust_ref = self.trust_api.get_trust(auth['trust_id'])
+ except exception.TrustNotFound:
raise exception.Forbidden()
if user_id != trust_ref['trustee_user_id']:
raise exception.Forbidden()
@@ -203,7 +206,7 @@ class Auth(controller.V2Controller):
raise exception.Forbidden()
if ('expires' in trust_ref) and (trust_ref['expires']):
expiry = trust_ref['expires']
- if expiry < timeutils.parse_isotime(timeutils.isotime()):
+ if expiry < timeutils.parse_isotime(utils.isotime()):
raise exception.Forbidden()
user_id = trust_ref['trustor_user_id']
trustor_user_ref = self.identity_api.get_user(
@@ -385,7 +388,8 @@ class Auth(controller.V2Controller):
role_list = self.assignment_api.get_roles_for_user_and_project(
user_id, tenant_id)
except exception.ProjectNotFound:
- pass
+ msg = _('Project ID not found: %(t_id)s') % {'t_id': tenant_id}
+ raise exception.Unauthorized(msg)
if not role_list:
msg = _('User %(u_id)s is unauthorized for tenant %(t_id)s')
@@ -460,7 +464,7 @@ class Auth(controller.V2Controller):
for t in tokens:
expires = t['expires']
if expires and isinstance(expires, datetime.datetime):
- t['expires'] = timeutils.isotime(expires)
+ t['expires'] = utils.isotime(expires)
data = {'revoked': tokens}
json_data = jsonutils.dumps(data)
signed_text = cms.cms_sign_text(json_data,
@@ -508,8 +512,8 @@ class Auth(controller.V2Controller):
return {}
endpoints = []
- for region_name, region_ref in six.iteritems(catalog_ref):
- for service_type, service_ref in six.iteritems(region_ref):
+ for region_name, region_ref in catalog_ref.items():
+ for service_type, service_ref in region_ref.items():
endpoints.append({
'id': service_ref.get('id'),
'name': service_ref.get('name'),
diff --git a/keystone-moon/keystone/token/persistence/__init__.py b/keystone-moon/keystone/token/persistence/__init__.py
index 29ad5653..89ec875d 100644
--- a/keystone-moon/keystone/token/persistence/__init__.py
+++ b/keystone-moon/keystone/token/persistence/__init__.py
@@ -13,4 +13,4 @@
from keystone.token.persistence.core import * # noqa
-__all__ = ['Manager', 'Driver', 'backends']
+__all__ = ['Manager', 'Driver']
diff --git a/keystone-moon/keystone/token/persistence/backends/kvs.py b/keystone-moon/keystone/token/persistence/backends/kvs.py
index b4807bf1..1bd08a31 100644
--- a/keystone-moon/keystone/token/persistence/backends/kvs.py
+++ b/keystone-moon/keystone/token/persistence/backends/kvs.py
@@ -22,6 +22,7 @@ from oslo_utils import timeutils
import six
from keystone.common import kvs
+from keystone.common import utils
from keystone import exception
from keystone.i18n import _, _LE, _LW
from keystone import token
@@ -56,10 +57,8 @@ class Token(token.persistence.Driver):
# is instantiated.
LOG.warn(_LW('It is recommended to only use the base '
'key-value-store implementation for the token driver '
- 'for testing purposes. Please use '
- 'keystone.token.persistence.backends.memcache.Token '
- 'or keystone.token.persistence.backends.sql.Token '
- 'instead.'))
+ "for testing purposes. Please use 'memcache' or "
+ "'sql' instead."))
def _prefix_token_id(self, token_id):
return 'token-%s' % token_id.encode('utf-8')
@@ -108,7 +107,7 @@ class Token(token.persistence.Driver):
# concern about the backend, always store the value(s) in the
# index as the isotime (string) version so this is where the string is
# built.
- expires_str = timeutils.isotime(data_copy['expires'], subsecond=True)
+ expires_str = utils.isotime(data_copy['expires'], subsecond=True)
self._set_key(ptk, data_copy)
user_id = data['user']['id']
@@ -207,8 +206,8 @@ class Token(token.persistence.Driver):
'revocation list.'), data['id'])
return
- revoked_token_data['expires'] = timeutils.isotime(expires,
- subsecond=True)
+ revoked_token_data['expires'] = utils.isotime(expires,
+ subsecond=True)
revoked_token_data['id'] = data['id']
token_list = self._get_key_or_default(self.revocation_key, default=[])
diff --git a/keystone-moon/keystone/token/persistence/backends/sql.py b/keystone-moon/keystone/token/persistence/backends/sql.py
index fc70fb92..08c3a216 100644
--- a/keystone-moon/keystone/token/persistence/backends/sql.py
+++ b/keystone-moon/keystone/token/persistence/backends/sql.py
@@ -127,6 +127,7 @@ class Token(token.persistence.Driver):
"""
session = sql.get_session()
+ token_list = []
with session.begin():
now = timeutils.utcnow()
query = session.query(TokenModel)
@@ -148,6 +149,9 @@ class Token(token.persistence.Driver):
continue
token_ref.valid = False
+ token_list.append(token_ref.id)
+
+ return token_list
def _tenant_matches(self, tenant_id, token_ref_dict):
return ((tenant_id is None) or
diff --git a/keystone-moon/keystone/token/persistence/core.py b/keystone-moon/keystone/token/persistence/core.py
index 19f0df35..15a58085 100644
--- a/keystone-moon/keystone/token/persistence/core.py
+++ b/keystone-moon/keystone/token/persistence/core.py
@@ -12,7 +12,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-"""Main entry point into the Token persistence service."""
+"""Main entry point into the Token Persistence service."""
import abc
import copy
@@ -27,6 +27,7 @@ from keystone.common import dependency
from keystone.common import manager
from keystone import exception
from keystone.i18n import _LW
+from keystone.token import utils
CONF = cfg.CONF
@@ -39,13 +40,15 @@ REVOCATION_MEMOIZE = cache.get_memoization_decorator(
@dependency.requires('assignment_api', 'identity_api', 'resource_api',
'token_provider_api', 'trust_api')
class PersistenceManager(manager.Manager):
- """Default pivot point for the Token backend.
+ """Default pivot point for the Token Persistence backend.
See :mod:`keystone.common.manager.Manager` for more details on how this
dynamically calls the backend.
"""
+ driver_namespace = 'keystone.token.persistence'
+
def __init__(self):
super(PersistenceManager, self).__init__(CONF.token.driver)
@@ -62,7 +65,7 @@ class PersistenceManager(manager.Manager):
# context['token_id'] will in-fact be None. This also saves
# a round-trip to the backend if we don't have a token_id.
raise exception.TokenNotFound(token_id='')
- unique_id = self.token_provider_api.unique_id(token_id)
+ unique_id = utils.generate_unique_id(token_id)
token_ref = self._get_token(unique_id)
# NOTE(morganfainberg): Lift expired checking to the manager, there is
# no reason to make the drivers implement this check. With caching,
@@ -77,7 +80,7 @@ class PersistenceManager(manager.Manager):
return self.driver.get_token(token_id)
def create_token(self, token_id, data):
- unique_id = self.token_provider_api.unique_id(token_id)
+ unique_id = utils.generate_unique_id(token_id)
data_copy = copy.deepcopy(data)
data_copy['id'] = unique_id
ret = self.driver.create_token(unique_id, data_copy)
@@ -91,7 +94,7 @@ class PersistenceManager(manager.Manager):
def delete_token(self, token_id):
if not CONF.token.revoke_by_id:
return
- unique_id = self.token_provider_api.unique_id(token_id)
+ unique_id = utils.generate_unique_id(token_id)
self.driver.delete_token(unique_id)
self._invalidate_individual_token_cache(unique_id)
self.invalidate_revocation_list()
@@ -100,11 +103,10 @@ class PersistenceManager(manager.Manager):
consumer_id=None):
if not CONF.token.revoke_by_id:
return
- token_list = self.driver._list_tokens(user_id, tenant_id, trust_id,
- consumer_id)
- self.driver.delete_tokens(user_id, tenant_id, trust_id, consumer_id)
+ token_list = self.driver.delete_tokens(user_id, tenant_id, trust_id,
+ consumer_id)
for token_id in token_list:
- unique_id = self.token_provider_api.unique_id(token_id)
+ unique_id = utils.generate_unique_id(token_id)
self._invalidate_individual_token_cache(unique_id)
self.invalidate_revocation_list()
@@ -196,11 +198,6 @@ class PersistenceManager(manager.Manager):
self.token_provider_api.invalidate_individual_token_cache(token_id)
-# NOTE(morganfainberg): @dependency.optional() is required here to ensure the
-# class-level optional dependency control attribute is populated as empty
-# this is because of the override of .__getattr__ and ensures that if the
-# optional dependency injector changes attributes, this class doesn't break.
-@dependency.optional()
@dependency.requires('token_provider_api')
@dependency.provider('token_api')
class Manager(object):
@@ -306,7 +303,7 @@ class Driver(object):
:type trust_id: string
:param consumer_id: identity of the consumer
:type consumer_id: string
- :returns: None.
+ :returns: The tokens that have been deleted.
:raises: keystone.exception.TokenNotFound
"""
@@ -322,6 +319,7 @@ class Driver(object):
self.delete_token(token)
except exception.NotFound:
pass
+ return token_list
@abc.abstractmethod
def _list_tokens(self, user_id, tenant_id=None, trust_id=None,
diff --git a/keystone-moon/keystone/token/provider.py b/keystone-moon/keystone/token/provider.py
index fb41d4bb..1422e41f 100644
--- a/keystone-moon/keystone/token/provider.py
+++ b/keystone-moon/keystone/token/provider.py
@@ -20,7 +20,6 @@ import datetime
import sys
import uuid
-from keystoneclient.common import cms
from oslo_config import cfg
from oslo_log import log
from oslo_utils import timeutils
@@ -34,6 +33,7 @@ from keystone.i18n import _, _LE
from keystone.models import token_model
from keystone import notifications
from keystone.token import persistence
+from keystone.token import utils
CONF = cfg.CONF
@@ -110,6 +110,8 @@ class Manager(manager.Manager):
"""
+ driver_namespace = 'keystone.token.provider'
+
V2 = V2
V3 = V3
VERSIONS = VERSIONS
@@ -145,7 +147,7 @@ class Manager(manager.Manager):
]
}
- for event, cb_info in six.iteritems(callbacks):
+ for event, cb_info in callbacks.items():
for resource_type, callback_fns in cb_info:
notifications.register_event_callback(event, resource_type,
callback_fns)
@@ -164,18 +166,6 @@ class Manager(manager.Manager):
self._persistence_manager = persistence.PersistenceManager()
return self._persistence_manager
- def unique_id(self, token_id):
- """Return a unique ID for a token.
-
- The returned value is useful as the primary key of a database table,
- memcache store, or other lookup table.
-
- :returns: Given a PKI token, returns it's hashed value. Otherwise,
- returns the passed-in value (such as a UUID token ID or an
- existing hash).
- """
- return cms.cms_hash_token(token_id, mode=CONF.token.hash_algorithm)
-
def _create_token(self, token_id, token_data):
try:
if isinstance(token_data['expires'], six.string_types):
@@ -192,7 +182,7 @@ class Manager(manager.Manager):
six.reraise(*exc_info)
def validate_token(self, token_id, belongs_to=None):
- unique_id = self.unique_id(token_id)
+ unique_id = utils.generate_unique_id(token_id)
# NOTE(morganfainberg): Ensure we never use the long-form token_id
# (PKI) as part of the cache_key.
token = self._validate_token(unique_id)
@@ -211,7 +201,7 @@ class Manager(manager.Manager):
self.revoke_api.check_token(token_values)
def validate_v2_token(self, token_id, belongs_to=None):
- unique_id = self.unique_id(token_id)
+ unique_id = utils.generate_unique_id(token_id)
if self._needs_persistence:
# NOTE(morganfainberg): Ensure we never use the long-form token_id
# (PKI) as part of the cache_key.
@@ -219,6 +209,7 @@ class Manager(manager.Manager):
else:
token_ref = token_id
token = self._validate_v2_token(token_ref)
+ token['access']['token']['id'] = token_id
self._token_belongs_to(token, belongs_to)
self._is_valid_token(token)
return token
@@ -239,7 +230,7 @@ class Manager(manager.Manager):
return self.check_revocation_v3(token)
def validate_v3_token(self, token_id):
- unique_id = self.unique_id(token_id)
+ unique_id = utils.generate_unique_id(token_id)
# NOTE(lbragstad): Only go to persistent storage if we have a token to
# fetch from the backend. If the Fernet token provider is being used
# this step isn't necessary. The Fernet token reference is persisted in
diff --git a/keystone-moon/keystone/token/providers/common.py b/keystone-moon/keystone/token/providers/common.py
index 717e1495..b71458cd 100644
--- a/keystone-moon/keystone/token/providers/common.py
+++ b/keystone-moon/keystone/token/providers/common.py
@@ -14,17 +14,17 @@
from oslo_config import cfg
from oslo_log import log
+from oslo_log import versionutils
from oslo_serialization import jsonutils
-from oslo_utils import timeutils
import six
from six.moves.urllib import parse
from keystone.common import controller as common_controller
from keystone.common import dependency
-from keystone.contrib import federation
+from keystone.common import utils
+from keystone.contrib.federation import constants as federation_constants
from keystone import exception
from keystone.i18n import _, _LE
-from keystone.openstack.common import versionutils
from keystone import token
from keystone.token import provider
@@ -37,18 +37,33 @@ CONF = cfg.CONF
class V2TokenDataHelper(object):
"""Creates V2 token data."""
- def v3_to_v2_token(self, token_id, v3_token_data):
+ def v3_to_v2_token(self, v3_token_data):
token_data = {}
# Build v2 token
v3_token = v3_token_data['token']
token = {}
- token['id'] = token_id
token['expires'] = v3_token.get('expires_at')
token['issued_at'] = v3_token.get('issued_at')
token['audit_ids'] = v3_token.get('audit_ids')
+ # Bail immediately if this is a domain-scoped token, which is not
+ # supported by the v2 API at all.
+ if 'domain' in v3_token:
+ raise exception.Unauthorized(_(
+ 'Domains are not supported by the v2 API. Please use the v3 '
+ 'API instead.'))
+
+ # Bail if this is a project-scoped token outside the default domain,
+ # which may result in a namespace collision with a project inside the
+ # default domain.
if 'project' in v3_token:
+ if (v3_token['project']['domain']['id'] !=
+ CONF.identity.default_domain_id):
+ raise exception.Unauthorized(_(
+ 'Project not found in the default domain (please use the '
+ 'v3 API instead): %s') % v3_token['project']['id'])
+
# v3 token_data does not contain all tenant attributes
tenant = self.resource_api.get_project(
v3_token['project']['id'])
@@ -58,14 +73,32 @@ class V2TokenDataHelper(object):
# Build v2 user
v3_user = v3_token['user']
+
+ # Bail if this is a token outside the default domain,
+ # which may result in a namespace collision with a project inside the
+ # default domain.
+ if ('domain' in v3_user and v3_user['domain']['id'] !=
+ CONF.identity.default_domain_id):
+ raise exception.Unauthorized(_(
+ 'User not found in the default domain (please use the v3 API '
+ 'instead): %s') % v3_user['id'])
+
user = common_controller.V2Controller.v3_to_v2_user(v3_user)
+ # Maintain Trust Data
+ if 'OS-TRUST:trust' in v3_token:
+ v3_trust_data = v3_token['OS-TRUST:trust']
+ token_data['trust'] = {
+ 'trustee_user_id': v3_trust_data['trustee_user']['id'],
+ 'id': v3_trust_data['id'],
+ 'trustor_user_id': v3_trust_data['trustor_user']['id'],
+ 'impersonation': v3_trust_data['impersonation']
+ }
+
# Set user roles
user['roles'] = []
role_ids = []
for role in v3_token.get('roles', []):
- # Filter role id since it's not included in v2 token response
- role_ids.append(role.pop('id'))
user['roles'].append(role)
user['roles_links'] = []
@@ -99,7 +132,7 @@ class V2TokenDataHelper(object):
expires = token_ref.get('expires', provider.default_expire_time())
if expires is not None:
if not isinstance(expires, six.text_type):
- expires = timeutils.isotime(expires)
+ expires = utils.isotime(expires)
token_data = token_ref.get('token_data')
if token_data:
@@ -112,7 +145,7 @@ class V2TokenDataHelper(object):
o = {'access': {'token': {'id': token_ref['id'],
'expires': expires,
- 'issued_at': timeutils.strtime(),
+ 'issued_at': utils.strtime(),
'audit_ids': audit_info
},
'user': {'id': user_ref['id'],
@@ -181,8 +214,8 @@ class V2TokenDataHelper(object):
return []
services = {}
- for region, region_ref in six.iteritems(catalog_ref):
- for service, service_ref in six.iteritems(region_ref):
+ for region, region_ref in catalog_ref.items():
+ for service, service_ref in region_ref.items():
new_service_ref = services.get(service, {})
new_service_ref['name'] = service_ref.pop('name')
new_service_ref['type'] = service
@@ -195,7 +228,7 @@ class V2TokenDataHelper(object):
new_service_ref['endpoints'] = endpoints_ref
services[service] = new_service_ref
- return services.values()
+ return list(services.values())
@dependency.requires('assignment_api', 'catalog_api', 'federation_api',
@@ -239,10 +272,26 @@ class V3TokenDataHelper(object):
user_id, project_id)
return [self.role_api.get_role(role_id) for role_id in roles]
- def _populate_roles_for_groups(self, group_ids,
- project_id=None, domain_id=None,
- user_id=None):
- def _check_roles(roles, user_id, project_id, domain_id):
+ def populate_roles_for_groups(self, token_data, group_ids,
+ project_id=None, domain_id=None,
+ user_id=None):
+ """Populate roles basing on provided groups and project/domain
+
+ Used for ephemeral users with dynamically assigned groups.
+ This method does not return anything, yet it modifies token_data in
+ place.
+
+ :param token_data: a dictionary used for building token response
+ :group_ids: list of group IDs a user is a member of
+ :project_id: project ID to scope to
+ :domain_id: domain ID to scope to
+ :user_id: user ID
+
+ :raises: exception.Unauthorized - when no roles were found for a
+ (group_ids, project_id) or (group_ids, domain_id) pairs.
+
+ """
+ def check_roles(roles, user_id, project_id, domain_id):
# User was granted roles so simply exit this function.
if roles:
return
@@ -264,8 +313,8 @@ class V3TokenDataHelper(object):
roles = self.assignment_api.get_roles_for_groups(group_ids,
project_id,
domain_id)
- _check_roles(roles, user_id, project_id, domain_id)
- return roles
+ check_roles(roles, user_id, project_id, domain_id)
+ token_data['roles'] = roles
def _populate_user(self, token_data, user_id, trust):
if 'user' in token_data:
@@ -393,10 +442,10 @@ class V3TokenDataHelper(object):
if not expires:
expires = provider.default_expire_time()
if not isinstance(expires, six.string_types):
- expires = timeutils.isotime(expires, subsecond=True)
+ expires = utils.isotime(expires, subsecond=True)
token_data['expires_at'] = expires
token_data['issued_at'] = (issued_at or
- timeutils.isotime(subsecond=True))
+ utils.isotime(subsecond=True))
def _populate_audit_info(self, token_data, audit_info=None):
if audit_info is None or isinstance(audit_info, six.string_types):
@@ -420,7 +469,7 @@ class V3TokenDataHelper(object):
versionutils.deprecated(
what='passing token data with "extras"',
as_of=versionutils.deprecated.KILO,
- in_favor_of='well-defined APIs')
+ in_favor_of='well-defined APIs')(lambda: None)()
token_data = {'methods': method_names,
'extras': extras}
@@ -490,13 +539,21 @@ class BaseProvider(provider.Provider):
return token_id, token_data
def _is_mapped_token(self, auth_context):
- return (federation.IDENTITY_PROVIDER in auth_context and
- federation.PROTOCOL in auth_context)
+ return (federation_constants.IDENTITY_PROVIDER in auth_context and
+ federation_constants.PROTOCOL in auth_context)
def issue_v3_token(self, user_id, method_names, expires_at=None,
project_id=None, domain_id=None, auth_context=None,
trust=None, metadata_ref=None, include_catalog=True,
parent_audit_id=None):
+ if auth_context and auth_context.get('bind'):
+ # NOTE(lbragstad): Check if the token provider being used actually
+ # supports bind authentication methods before proceeding.
+ if not self._supports_bind_authentication:
+ raise exception.NotImplemented(_(
+ 'The configured token provider does not support bind '
+ 'authentication.'))
+
# for V2, trust is stashed in metadata_ref
if (CONF.trust.enabled and not trust and metadata_ref and
'trust_id' in metadata_ref):
@@ -530,38 +587,30 @@ class BaseProvider(provider.Provider):
return token_id, token_data
def _handle_mapped_tokens(self, auth_context, project_id, domain_id):
- def get_federated_domain():
- return (CONF.federation.federated_domain_name or
- federation.FEDERATED_DOMAIN_KEYWORD)
-
- federated_domain = get_federated_domain()
user_id = auth_context['user_id']
group_ids = auth_context['group_ids']
- idp = auth_context[federation.IDENTITY_PROVIDER]
- protocol = auth_context[federation.PROTOCOL]
+ idp = auth_context[federation_constants.IDENTITY_PROVIDER]
+ protocol = auth_context[federation_constants.PROTOCOL]
token_data = {
'user': {
'id': user_id,
'name': parse.unquote(user_id),
- federation.FEDERATION: {
+ federation_constants.FEDERATION: {
+ 'groups': [{'id': x} for x in group_ids],
'identity_provider': {'id': idp},
'protocol': {'id': protocol}
},
'domain': {
- 'id': federated_domain,
- 'name': federated_domain
+ 'id': CONF.federation.federated_domain_name,
+ 'name': CONF.federation.federated_domain_name
}
}
}
if project_id or domain_id:
- roles = self.v3_token_data_helper._populate_roles_for_groups(
- group_ids, project_id, domain_id, user_id)
- token_data.update({'roles': roles})
- else:
- token_data['user'][federation.FEDERATION].update({
- 'groups': [{'id': x} for x in group_ids]
- })
+ self.v3_token_data_helper.populate_roles_for_groups(
+ token_data, group_ids, project_id, domain_id, user_id)
+
return token_data
def _verify_token_ref(self, token_ref):
@@ -637,30 +686,10 @@ class BaseProvider(provider.Provider):
# management layer is now pluggable, one can always provide
# their own implementation to suit their needs.
token_data = token_ref.get('token_data')
- if (not token_data or
- self.get_token_version(token_data) !=
- token.provider.V2):
- # token is created by old v2 logic
- metadata_ref = token_ref['metadata']
- roles_ref = []
- for role_id in metadata_ref.get('roles', []):
- roles_ref.append(self.role_api.get_role(role_id))
-
- # Get a service catalog if possible
- # This is needed for on-behalf-of requests
- catalog_ref = None
- if token_ref.get('tenant'):
- catalog_ref = self.catalog_api.get_catalog(
- token_ref['user']['id'],
- token_ref['tenant']['id'])
-
- trust_ref = None
- if CONF.trust.enabled and 'trust_id' in metadata_ref:
- trust_ref = self.trust_api.get_trust(
- metadata_ref['trust_id'])
-
- token_data = self.v2_token_data_helper.format_token(
- token_ref, roles_ref, catalog_ref, trust_ref)
+ if (self.get_token_version(token_data) != token.provider.V2):
+ # Validate the V3 token as V2
+ token_data = self.v2_token_data_helper.v3_to_v2_token(
+ token_data)
trust_id = token_data['access'].get('trust', {}).get('id')
if trust_id:
diff --git a/keystone-moon/keystone/token/providers/fernet/core.py b/keystone-moon/keystone/token/providers/fernet/core.py
index b1da263b..1bbacb03 100644
--- a/keystone-moon/keystone/token/providers/fernet/core.py
+++ b/keystone-moon/keystone/token/providers/fernet/core.py
@@ -14,7 +14,8 @@ from oslo_config import cfg
from oslo_log import log
from keystone.common import dependency
-from keystone.contrib import federation
+from keystone.common import utils as ks_utils
+from keystone.contrib.federation import constants as federation_constants
from keystone import exception
from keystone.i18n import _
from keystone.token import provider
@@ -59,6 +60,9 @@ class Provider(common.BaseProvider):
if token_ref.get('tenant'):
project_id = token_ref['tenant']['id']
+ # maintain expiration time across rescopes
+ expires = token_ref.get('expires')
+
parent_audit_id = token_ref.get('parent_audit_id')
# If parent_audit_id is defined then a token authentication was made
if parent_audit_id:
@@ -80,136 +84,132 @@ class Provider(common.BaseProvider):
project_id=project_id,
token=token_ref,
include_catalog=False,
- audit_info=audit_ids)
+ audit_info=audit_ids,
+ expires=expires)
expires_at = v3_token_data['token']['expires_at']
token_id = self.token_formatter.create_token(user_id, expires_at,
audit_ids,
methods=method_names,
project_id=project_id)
+ self._build_issued_at_info(token_id, v3_token_data)
# Convert v3 to v2 token data and build v2 catalog
- token_data = self.v2_token_data_helper.v3_to_v2_token(token_id,
- v3_token_data)
+ token_data = self.v2_token_data_helper.v3_to_v2_token(v3_token_data)
+ token_data['access']['token']['id'] = token_id
+
+ return token_id, token_data
+ def issue_v3_token(self, *args, **kwargs):
+ token_id, token_data = super(Provider, self).issue_v3_token(
+ *args, **kwargs)
+ self._build_issued_at_info(token_id, token_data)
return token_id, token_data
+ def _build_issued_at_info(self, token_id, token_data):
+ # NOTE(roxanaghe, lbragstad): We must use the creation time that
+ # Fernet builds into it's token. The Fernet spec details that the
+ # token creation time is built into the token, outside of the payload
+ # provided by Keystone. This is the reason why we don't pass the
+ # issued_at time in the payload. This also means that we shouldn't
+ # return a token reference with a creation time that we created
+ # when Fernet uses a different creation time. We should use the
+ # creation time provided by Fernet because it's the creation time
+ # that we have to rely on when we validate the token.
+ fernet_creation_datetime_obj = self.token_formatter.creation_time(
+ token_id)
+ token_data['token']['issued_at'] = ks_utils.isotime(
+ at=fernet_creation_datetime_obj, subsecond=True)
+
def _build_federated_info(self, token_data):
"""Extract everything needed for federated tokens.
- This dictionary is passed to the FederatedPayload token formatter,
- which unpacks the values and builds the Fernet token.
+ This dictionary is passed to federated token formatters, which unpack
+ the values and build federated Fernet tokens.
"""
- group_ids = token_data.get('user', {}).get(
- federation.FEDERATION, {}).get('groups')
- idp_id = token_data.get('user', {}).get(
- federation.FEDERATION, {}).get('identity_provider', {}).get('id')
- protocol_id = token_data.get('user', {}).get(
- federation.FEDERATION, {}).get('protocol', {}).get('id')
- if not group_ids:
- group_ids = list()
- federated_dict = dict(group_ids=group_ids, idp_id=idp_id,
- protocol_id=protocol_id)
- return federated_dict
+ idp_id = token_data['token'].get('user', {}).get(
+ federation_constants.FEDERATION, {}).get(
+ 'identity_provider', {}).get('id')
+ protocol_id = token_data['token'].get('user', {}).get(
+ federation_constants.FEDERATION, {}).get('protocol', {}).get('id')
+ # If we don't have an identity provider ID and a protocol ID, it's safe
+ # to assume we aren't dealing with a federated token.
+ if not (idp_id and protocol_id):
+ return None
+
+ group_ids = token_data['token'].get('user', {}).get(
+ federation_constants.FEDERATION, {}).get('groups')
+
+ return {'group_ids': group_ids,
+ 'idp_id': idp_id,
+ 'protocol_id': protocol_id}
def _rebuild_federated_info(self, federated_dict, user_id):
"""Format federated information into the token reference.
- The federated_dict is passed back from the FederatedPayload token
- formatter. The responsibility of this method is to format the
- information passed back from the token formatter into the token
- reference before constructing the token data from the
- V3TokenDataHelper.
+ The federated_dict is passed back from the federated token formatters.
+ The responsibility of this method is to format the information passed
+ back from the token formatter into the token reference before
+ constructing the token data from the V3TokenDataHelper.
"""
g_ids = federated_dict['group_ids']
idp_id = federated_dict['idp_id']
protocol_id = federated_dict['protocol_id']
- federated_info = dict(groups=g_ids,
- identity_provider=dict(id=idp_id),
- protocol=dict(id=protocol_id))
- token_dict = {'user': {federation.FEDERATION: federated_info}}
- token_dict['user']['id'] = user_id
- token_dict['user']['name'] = user_id
+
+ federated_info = {
+ 'groups': g_ids,
+ 'identity_provider': {'id': idp_id},
+ 'protocol': {'id': protocol_id}
+ }
+
+ token_dict = {
+ 'user': {
+ federation_constants.FEDERATION: federated_info,
+ 'id': user_id,
+ 'name': user_id
+ }
+ }
+
return token_dict
- def issue_v3_token(self, user_id, method_names, expires_at=None,
- project_id=None, domain_id=None, auth_context=None,
- trust=None, metadata_ref=None, include_catalog=True,
- parent_audit_id=None):
- """Issue a V3 formatted token.
-
- Here is where we need to detect what is given to us, and what kind of
- token the user is expecting. Depending on the outcome of that, we can
- pass all the information to be packed to the proper token format
- handler.
-
- :param user_id: ID of the user
- :param method_names: method of authentication
- :param expires_at: token expiration time
- :param project_id: ID of the project being scoped to
- :param domain_id: ID of the domain being scoped to
- :param auth_context: authentication context
- :param trust: ID of the trust
- :param metadata_ref: metadata reference
- :param include_catalog: return the catalog in the response if True,
- otherwise don't return the catalog
- :param parent_audit_id: ID of the patent audit entity
- :returns: tuple containing the id of the token and the token data
+ def _rebuild_federated_token_roles(self, token_dict, federated_dict,
+ user_id, project_id, domain_id):
+ """Populate roles based on (groups, project/domain) pair.
- """
- # TODO(lbragstad): Currently, Fernet tokens don't support bind in the
- # token format. Raise a 501 if we're dealing with bind.
- if auth_context.get('bind'):
- raise exception.NotImplemented()
+ We must populate roles from (groups, project/domain) as ephemeral users
+ don't exist in the backend. Upon success, a ``roles`` key will be added
+ to ``token_dict``.
- token_ref = None
- # NOTE(lbragstad): This determines if we are dealing with a federated
- # token or not. The groups for the user will be in the returned token
- # reference.
- federated_dict = None
- if auth_context and self._is_mapped_token(auth_context):
- token_ref = self._handle_mapped_tokens(
- auth_context, project_id, domain_id)
- federated_dict = self._build_federated_info(token_ref)
-
- token_data = self.v3_token_data_helper.get_token_data(
- user_id,
- method_names,
- auth_context.get('extras') if auth_context else None,
- domain_id=domain_id,
- project_id=project_id,
- expires=expires_at,
- trust=trust,
- bind=auth_context.get('bind') if auth_context else None,
- token=token_ref,
- include_catalog=include_catalog,
- audit_info=parent_audit_id)
+ :param token_dict: dictionary with data used for building token
+ :param federated_dict: federated information such as identity provider
+ protocol and set of group IDs
+ :param user_id: user ID
+ :param project_id: project ID the token is being scoped to
+ :param domain_id: domain ID the token is being scoped to
- token = self.token_formatter.create_token(
- user_id,
- token_data['token']['expires_at'],
- token_data['token']['audit_ids'],
- methods=method_names,
- domain_id=domain_id,
- project_id=project_id,
- trust_id=token_data['token'].get('OS-TRUST:trust', {}).get('id'),
- federated_info=federated_dict)
- return token, token_data
+ """
+ group_ids = [x['id'] for x in federated_dict['group_ids']]
+ self.v3_token_data_helper.populate_roles_for_groups(
+ token_dict, group_ids, project_id, domain_id, user_id)
def validate_v2_token(self, token_ref):
"""Validate a V2 formatted token.
:param token_ref: reference describing the token to validate
:returns: the token data
+ :raises keystone.exception.TokenNotFound: if token format is invalid
:raises keystone.exception.Unauthorized: if v3 token is used
"""
- (user_id, methods,
- audit_ids, domain_id,
- project_id, trust_id,
- federated_info, created_at,
- expires_at) = self.token_formatter.validate_token(token_ref)
+ try:
+ (user_id, methods,
+ audit_ids, domain_id,
+ project_id, trust_id,
+ federated_info, created_at,
+ expires_at) = self.token_formatter.validate_token(token_ref)
+ except exception.ValidationError as e:
+ raise exception.TokenNotFound(e)
if trust_id or domain_id or federated_info:
msg = _('This is not a v2.0 Fernet token. Use v3 for trust, '
@@ -225,26 +225,36 @@ class Provider(common.BaseProvider):
token=token_ref,
include_catalog=False,
audit_info=audit_ids)
- return self.v2_token_data_helper.v3_to_v2_token(token_ref,
- v3_token_data)
+ token_data = self.v2_token_data_helper.v3_to_v2_token(v3_token_data)
+ token_data['access']['token']['id'] = token_ref
+ return token_data
def validate_v3_token(self, token):
"""Validate a V3 formatted token.
:param token: a string describing the token to validate
:returns: the token data
- :raises keystone.exception.Unauthorized: if token format version isn't
+ :raises keystone.exception.TokenNotFound: if token format version isn't
supported
"""
- (user_id, methods, audit_ids, domain_id, project_id, trust_id,
- federated_info, created_at, expires_at) = (
- self.token_formatter.validate_token(token))
+ try:
+ (user_id, methods, audit_ids, domain_id, project_id, trust_id,
+ federated_info, created_at, expires_at) = (
+ self.token_formatter.validate_token(token))
+ except exception.ValidationError as e:
+ raise exception.TokenNotFound(e)
token_dict = None
+ trust_ref = None
if federated_info:
token_dict = self._rebuild_federated_info(federated_info, user_id)
- trust_ref = self.trust_api.get_trust(trust_id)
+ if project_id or domain_id:
+ self._rebuild_federated_token_roles(token_dict, federated_info,
+ user_id, project_id,
+ domain_id)
+ if trust_id:
+ trust_ref = self.trust_api.get_trust(trust_id)
return self.v3_token_data_helper.get_token_data(
user_id,
@@ -264,4 +274,21 @@ class Provider(common.BaseProvider):
:type token_data: dict
:raises keystone.exception.NotImplemented: when called
"""
- raise exception.NotImplemented()
+ return self.token_formatter.create_token(
+ token_data['token']['user']['id'],
+ token_data['token']['expires_at'],
+ token_data['token']['audit_ids'],
+ methods=token_data['token'].get('methods'),
+ domain_id=token_data['token'].get('domain', {}).get('id'),
+ project_id=token_data['token'].get('project', {}).get('id'),
+ trust_id=token_data['token'].get('OS-TRUST:trust', {}).get('id'),
+ federated_info=self._build_federated_info(token_data)
+ )
+
+ @property
+ def _supports_bind_authentication(self):
+ """Return if the token provider supports bind authentication methods.
+
+ :returns: False
+ """
+ return False
diff --git a/keystone-moon/keystone/token/providers/fernet/token_formatters.py b/keystone-moon/keystone/token/providers/fernet/token_formatters.py
index 50960923..d1dbb08c 100644
--- a/keystone-moon/keystone/token/providers/fernet/token_formatters.py
+++ b/keystone-moon/keystone/token/providers/fernet/token_formatters.py
@@ -21,11 +21,12 @@ from oslo_config import cfg
from oslo_log import log
from oslo_utils import timeutils
import six
-from six.moves import urllib
+from six.moves import map, urllib
from keystone.auth import plugins as auth_plugins
+from keystone.common import utils as ks_utils
from keystone import exception
-from keystone.i18n import _
+from keystone.i18n import _, _LI
from keystone.token import provider
from keystone.token.providers.fernet import utils
@@ -60,7 +61,7 @@ class TokenFormatter(object):
if not keys:
raise exception.KeysNotFound()
- fernet_instances = [fernet.Fernet(key) for key in utils.load_keys()]
+ fernet_instances = [fernet.Fernet(key) for key in keys]
return fernet.MultiFernet(fernet_instances)
def pack(self, payload):
@@ -74,8 +75,9 @@ class TokenFormatter(object):
try:
return self.crypto.decrypt(token)
- except fernet.InvalidToken as e:
- raise exception.Unauthorized(six.text_type(e))
+ except fernet.InvalidToken:
+ raise exception.ValidationError(
+ _('This is not a recognized Fernet token'))
@classmethod
def creation_time(cls, fernet_token):
@@ -115,9 +117,27 @@ class TokenFormatter(object):
expires_at,
audit_ids,
trust_id)
+ elif project_id and federated_info:
+ version = FederatedProjectScopedPayload.version
+ payload = FederatedProjectScopedPayload.assemble(
+ user_id,
+ methods,
+ project_id,
+ expires_at,
+ audit_ids,
+ federated_info)
+ elif domain_id and federated_info:
+ version = FederatedDomainScopedPayload.version
+ payload = FederatedDomainScopedPayload.assemble(
+ user_id,
+ methods,
+ domain_id,
+ expires_at,
+ audit_ids,
+ federated_info)
elif federated_info:
- version = FederatedPayload.version
- payload = FederatedPayload.assemble(
+ version = FederatedUnscopedPayload.version
+ payload = FederatedUnscopedPayload.assemble(
user_id,
methods,
expires_at,
@@ -151,6 +171,17 @@ class TokenFormatter(object):
serialized_payload = msgpack.packb(versioned_payload)
token = self.pack(serialized_payload)
+ # NOTE(lbragstad): We should warn against Fernet tokens that are over
+ # 255 characters in length. This is mostly due to persisting the tokens
+ # in a backend store of some kind that might have a limit of 255
+ # characters. Even though Keystone isn't storing a Fernet token
+ # anywhere, we can't say it isn't being stored somewhere else with
+ # those kind of backend constraints.
+ if len(token) > 255:
+ LOG.info(_LI('Fernet token created with length of %d '
+ 'characters, which exceeds 255 characters'),
+ len(token))
+
return token
def validate_token(self, token):
@@ -181,21 +212,29 @@ class TokenFormatter(object):
elif version == TrustScopedPayload.version:
(user_id, methods, project_id, expires_at, audit_ids, trust_id) = (
TrustScopedPayload.disassemble(payload))
- elif version == FederatedPayload.version:
+ elif version == FederatedUnscopedPayload.version:
(user_id, methods, expires_at, audit_ids, federated_info) = (
- FederatedPayload.disassemble(payload))
+ FederatedUnscopedPayload.disassemble(payload))
+ elif version == FederatedProjectScopedPayload.version:
+ (user_id, methods, project_id, expires_at, audit_ids,
+ federated_info) = FederatedProjectScopedPayload.disassemble(
+ payload)
+ elif version == FederatedDomainScopedPayload.version:
+ (user_id, methods, domain_id, expires_at, audit_ids,
+ federated_info) = FederatedDomainScopedPayload.disassemble(
+ payload)
else:
- # If the token_format is not recognized, raise Unauthorized.
- raise exception.Unauthorized(_(
+ # If the token_format is not recognized, raise ValidationError.
+ raise exception.ValidationError(_(
'This is not a recognized Fernet payload version: %s') %
version)
# rather than appearing in the payload, the creation time is encoded
# into the token format itself
created_at = TokenFormatter.creation_time(token)
- created_at = timeutils.isotime(at=created_at, subsecond=True)
+ created_at = ks_utils.isotime(at=created_at, subsecond=True)
expires_at = timeutils.parse_isotime(expires_at)
- expires_at = timeutils.isotime(at=expires_at, subsecond=True)
+ expires_at = ks_utils.isotime(at=expires_at, subsecond=True)
return (user_id, methods, audit_ids, domain_id, project_id, trust_id,
federated_info, created_at, expires_at)
@@ -273,8 +312,8 @@ class BasePayload(object):
:returns: a time formatted strings
"""
- time_object = datetime.datetime.utcfromtimestamp(int(time_int))
- return timeutils.isotime(time_object)
+ time_object = datetime.datetime.utcfromtimestamp(time_int)
+ return ks_utils.isotime(time_object, subsecond=True)
@classmethod
def attempt_convert_uuid_hex_to_bytes(cls, value):
@@ -319,7 +358,7 @@ class UnscopedPayload(BasePayload):
:returns: the payload of an unscoped token
"""
- b_user_id = cls.convert_uuid_hex_to_bytes(user_id)
+ b_user_id = cls.attempt_convert_uuid_hex_to_bytes(user_id)
methods = auth_plugins.convert_method_list_to_integer(methods)
expires_at_int = cls._convert_time_string_to_int(expires_at)
b_audit_ids = list(map(provider.random_urlsafe_str_to_bytes,
@@ -335,7 +374,7 @@ class UnscopedPayload(BasePayload):
audit_ids
"""
- user_id = cls.convert_uuid_bytes_to_hex(payload[0])
+ user_id = cls.attempt_convert_uuid_bytes_to_hex(payload[0])
methods = auth_plugins.convert_integer_to_method_list(payload[1])
expires_at_str = cls._convert_int_to_time_string(payload[2])
audit_ids = list(map(provider.base64_encode, payload[3]))
@@ -357,7 +396,7 @@ class DomainScopedPayload(BasePayload):
:returns: the payload of a domain-scoped token
"""
- b_user_id = cls.convert_uuid_hex_to_bytes(user_id)
+ b_user_id = cls.attempt_convert_uuid_hex_to_bytes(user_id)
methods = auth_plugins.convert_method_list_to_integer(methods)
try:
b_domain_id = cls.convert_uuid_hex_to_bytes(domain_id)
@@ -381,7 +420,7 @@ class DomainScopedPayload(BasePayload):
expires_at_str, and audit_ids
"""
- user_id = cls.convert_uuid_bytes_to_hex(payload[0])
+ user_id = cls.attempt_convert_uuid_bytes_to_hex(payload[0])
methods = auth_plugins.convert_integer_to_method_list(payload[1])
try:
domain_id = cls.convert_uuid_bytes_to_hex(payload[2])
@@ -412,9 +451,9 @@ class ProjectScopedPayload(BasePayload):
:returns: the payload of a project-scoped token
"""
- b_user_id = cls.convert_uuid_hex_to_bytes(user_id)
+ b_user_id = cls.attempt_convert_uuid_hex_to_bytes(user_id)
methods = auth_plugins.convert_method_list_to_integer(methods)
- b_project_id = cls.convert_uuid_hex_to_bytes(project_id)
+ b_project_id = cls.attempt_convert_uuid_hex_to_bytes(project_id)
expires_at_int = cls._convert_time_string_to_int(expires_at)
b_audit_ids = list(map(provider.random_urlsafe_str_to_bytes,
audit_ids))
@@ -429,9 +468,9 @@ class ProjectScopedPayload(BasePayload):
expires_at_str, and audit_ids
"""
- user_id = cls.convert_uuid_bytes_to_hex(payload[0])
+ user_id = cls.attempt_convert_uuid_bytes_to_hex(payload[0])
methods = auth_plugins.convert_integer_to_method_list(payload[1])
- project_id = cls.convert_uuid_bytes_to_hex(payload[2])
+ project_id = cls.attempt_convert_uuid_bytes_to_hex(payload[2])
expires_at_str = cls._convert_int_to_time_string(payload[3])
audit_ids = list(map(provider.base64_encode, payload[4]))
@@ -455,9 +494,9 @@ class TrustScopedPayload(BasePayload):
:returns: the payload of a trust-scoped token
"""
- b_user_id = cls.convert_uuid_hex_to_bytes(user_id)
+ b_user_id = cls.attempt_convert_uuid_hex_to_bytes(user_id)
methods = auth_plugins.convert_method_list_to_integer(methods)
- b_project_id = cls.convert_uuid_hex_to_bytes(project_id)
+ b_project_id = cls.attempt_convert_uuid_hex_to_bytes(project_id)
b_trust_id = cls.convert_uuid_hex_to_bytes(trust_id)
expires_at_int = cls._convert_time_string_to_int(expires_at)
b_audit_ids = list(map(provider.random_urlsafe_str_to_bytes,
@@ -475,9 +514,9 @@ class TrustScopedPayload(BasePayload):
expires_at_str, audit_ids, and trust_id
"""
- user_id = cls.convert_uuid_bytes_to_hex(payload[0])
+ user_id = cls.attempt_convert_uuid_bytes_to_hex(payload[0])
methods = auth_plugins.convert_integer_to_method_list(payload[1])
- project_id = cls.convert_uuid_bytes_to_hex(payload[2])
+ project_id = cls.attempt_convert_uuid_bytes_to_hex(payload[2])
expires_at_str = cls._convert_int_to_time_string(payload[3])
audit_ids = list(map(provider.base64_encode, payload[4]))
trust_id = cls.convert_uuid_bytes_to_hex(payload[5])
@@ -486,10 +525,19 @@ class TrustScopedPayload(BasePayload):
trust_id)
-class FederatedPayload(BasePayload):
+class FederatedUnscopedPayload(BasePayload):
version = 4
@classmethod
+ def pack_group_id(cls, group_dict):
+ return cls.attempt_convert_uuid_hex_to_bytes(group_dict['id'])
+
+ @classmethod
+ def unpack_group_id(cls, group_id_in_bytes):
+ group_id = cls.attempt_convert_uuid_bytes_to_hex(group_id_in_bytes)
+ return {'id': group_id}
+
+ @classmethod
def assemble(cls, user_id, methods, expires_at, audit_ids, federated_info):
"""Assemble the payload of a federated token.
@@ -503,24 +551,24 @@ class FederatedPayload(BasePayload):
:returns: the payload of a federated token
"""
- def pack_group_ids(group_dict):
- return cls.convert_uuid_hex_to_bytes(group_dict['id'])
b_user_id = cls.attempt_convert_uuid_hex_to_bytes(user_id)
methods = auth_plugins.convert_method_list_to_integer(methods)
- b_group_ids = map(pack_group_ids, federated_info['group_ids'])
+ b_group_ids = list(map(cls.pack_group_id,
+ federated_info['group_ids']))
b_idp_id = cls.attempt_convert_uuid_hex_to_bytes(
federated_info['idp_id'])
protocol_id = federated_info['protocol_id']
expires_at_int = cls._convert_time_string_to_int(expires_at)
- b_audit_ids = map(provider.random_urlsafe_str_to_bytes, audit_ids)
+ b_audit_ids = list(map(provider.random_urlsafe_str_to_bytes,
+ audit_ids))
return (b_user_id, methods, b_group_ids, b_idp_id, protocol_id,
expires_at_int, b_audit_ids)
@classmethod
def disassemble(cls, payload):
- """Validate a federated paylod.
+ """Validate a federated payload.
:param token_string: a string representing the token
:return: a tuple containing the user_id, auth methods, audit_ids, and
@@ -529,17 +577,81 @@ class FederatedPayload(BasePayload):
federated domain ID
"""
- def unpack_group_ids(group_id_in_bytes):
- group_id = cls.convert_uuid_bytes_to_hex(group_id_in_bytes)
- return {'id': group_id}
user_id = cls.attempt_convert_uuid_bytes_to_hex(payload[0])
methods = auth_plugins.convert_integer_to_method_list(payload[1])
- group_ids = map(unpack_group_ids, payload[2])
+ group_ids = list(map(cls.unpack_group_id, payload[2]))
idp_id = cls.attempt_convert_uuid_bytes_to_hex(payload[3])
protocol_id = payload[4]
expires_at_str = cls._convert_int_to_time_string(payload[5])
- audit_ids = map(provider.base64_encode, payload[6])
+ audit_ids = list(map(provider.base64_encode, payload[6]))
federated_info = dict(group_ids=group_ids, idp_id=idp_id,
protocol_id=protocol_id)
return (user_id, methods, expires_at_str, audit_ids, federated_info)
+
+
+class FederatedScopedPayload(FederatedUnscopedPayload):
+ version = None
+
+ @classmethod
+ def assemble(cls, user_id, methods, scope_id, expires_at, audit_ids,
+ federated_info):
+ """Assemble the project-scoped payload of a federated token.
+
+ :param user_id: ID of the user in the token request
+ :param methods: list of authentication methods used
+ :param scope_id: ID of the project or domain ID to scope to
+ :param expires_at: datetime of the token's expiration
+ :param audit_ids: list of the token's audit IDs
+ :param federated_info: dictionary containing the identity provider ID,
+ protocol ID, federated domain ID and group IDs
+ :returns: the payload of a federated token
+
+ """
+ b_user_id = cls.attempt_convert_uuid_hex_to_bytes(user_id)
+ methods = auth_plugins.convert_method_list_to_integer(methods)
+ b_scope_id = cls.attempt_convert_uuid_hex_to_bytes(scope_id)
+ b_group_ids = list(map(cls.pack_group_id,
+ federated_info['group_ids']))
+ b_idp_id = cls.attempt_convert_uuid_hex_to_bytes(
+ federated_info['idp_id'])
+ protocol_id = federated_info['protocol_id']
+ expires_at_int = cls._convert_time_string_to_int(expires_at)
+ b_audit_ids = list(map(provider.random_urlsafe_str_to_bytes,
+ audit_ids))
+
+ return (b_user_id, methods, b_scope_id, b_group_ids, b_idp_id,
+ protocol_id, expires_at_int, b_audit_ids)
+
+ @classmethod
+ def disassemble(cls, payload):
+ """Validate a project-scoped federated payload.
+
+ :param token_string: a string representing the token
+ :returns: a tuple containing the user_id, auth methods, scope_id,
+ expiration time (as str), audit_ids, and a dictionary
+ containing federated information such as the the identity
+ provider ID, the protocol ID, the federated domain ID and
+ group IDs
+
+ """
+ user_id = cls.attempt_convert_uuid_bytes_to_hex(payload[0])
+ methods = auth_plugins.convert_integer_to_method_list(payload[1])
+ scope_id = cls.attempt_convert_uuid_bytes_to_hex(payload[2])
+ group_ids = list(map(cls.unpack_group_id, payload[3]))
+ idp_id = cls.attempt_convert_uuid_bytes_to_hex(payload[4])
+ protocol_id = payload[5]
+ expires_at_str = cls._convert_int_to_time_string(payload[6])
+ audit_ids = list(map(provider.base64_encode, payload[7]))
+ federated_info = dict(idp_id=idp_id, protocol_id=protocol_id,
+ group_ids=group_ids)
+ return (user_id, methods, scope_id, expires_at_str, audit_ids,
+ federated_info)
+
+
+class FederatedProjectScopedPayload(FederatedScopedPayload):
+ version = 5
+
+
+class FederatedDomainScopedPayload(FederatedScopedPayload):
+ version = 6
diff --git a/keystone-moon/keystone/token/providers/fernet/utils.py b/keystone-moon/keystone/token/providers/fernet/utils.py
index 56624ee5..4235eda8 100644
--- a/keystone-moon/keystone/token/providers/fernet/utils.py
+++ b/keystone-moon/keystone/token/providers/fernet/utils.py
@@ -59,8 +59,8 @@ def _convert_to_integers(id_value):
try:
id_int = int(id_value)
except ValueError as e:
- msg = ('Unable to convert Keystone user or group ID. Error: %s', e)
- LOG.error(msg)
+ msg = _LE('Unable to convert Keystone user or group ID. Error: %s')
+ LOG.error(msg, e)
raise
return id_int
@@ -174,11 +174,16 @@ def rotate_keys(keystone_user_id=None, keystone_group_id=None):
for filename in os.listdir(CONF.fernet_tokens.key_repository):
path = os.path.join(CONF.fernet_tokens.key_repository, str(filename))
if os.path.isfile(path):
- key_files[int(filename)] = path
+ try:
+ key_id = int(filename)
+ except ValueError:
+ pass
+ else:
+ key_files[key_id] = path
LOG.info(_LI('Starting key rotation with %(count)s key files: %(list)s'), {
'count': len(key_files),
- 'list': key_files.values()})
+ 'list': list(key_files.values())})
# determine the number of the new primary key
current_primary_key = max(key_files.keys())
@@ -199,20 +204,24 @@ def rotate_keys(keystone_user_id=None, keystone_group_id=None):
# add a new key to the rotation, which will be the *next* primary
_create_new_key(keystone_user_id, keystone_group_id)
+ max_active_keys = CONF.fernet_tokens.max_active_keys
# check for bad configuration
- if CONF.fernet_tokens.max_active_keys < 1:
+ if max_active_keys < 1:
LOG.warning(_LW(
'[fernet_tokens] max_active_keys must be at least 1 to maintain a '
'primary key.'))
- CONF.fernet_tokens.max_active_keys = 1
+ max_active_keys = 1
# purge excess keys
- keys = sorted(key_files.keys())
- excess_keys = (
- keys[:len(key_files) - CONF.fernet_tokens.max_active_keys + 1])
- LOG.info(_LI('Excess keys to purge: %s'), excess_keys)
- for i in excess_keys:
- os.remove(key_files[i])
+
+ # Note that key_files doesn't contain the new active key that was created,
+ # only the old active keys.
+ keys = sorted(key_files.keys(), reverse=True)
+ while len(keys) > (max_active_keys - 1):
+ index_to_purge = keys.pop()
+ key_to_purge = key_files[index_to_purge]
+ LOG.info(_LI('Excess key to purge: %s'), key_to_purge)
+ os.remove(key_to_purge)
def load_keys():
@@ -232,12 +241,25 @@ def load_keys():
path = os.path.join(CONF.fernet_tokens.key_repository, str(filename))
if os.path.isfile(path):
with open(path, 'r') as key_file:
- keys[int(filename)] = key_file.read()
-
- LOG.info(_LI(
- 'Loaded %(count)s encryption keys from: %(dir)s'), {
- 'count': len(keys),
- 'dir': CONF.fernet_tokens.key_repository})
+ try:
+ key_id = int(filename)
+ except ValueError:
+ pass
+ else:
+ keys[key_id] = key_file.read()
+
+ if len(keys) != CONF.fernet_tokens.max_active_keys:
+ # If there haven't been enough key rotations to reach max_active_keys,
+ # or if the configured value of max_active_keys has changed since the
+ # last rotation, then reporting the discrepancy might be useful. Once
+ # the number of keys matches max_active_keys, this log entry is too
+ # repetitive to be useful.
+ LOG.info(_LI(
+ 'Loaded %(count)d encryption keys (max_active_keys=%(max)d) from: '
+ '%(dir)s'), {
+ 'count': len(keys),
+ 'max': CONF.fernet_tokens.max_active_keys,
+ 'dir': CONF.fernet_tokens.key_repository})
# return the encryption_keys, sorted by key number, descending
return [keys[x] for x in sorted(keys.keys(), reverse=True)]
diff --git a/keystone-moon/keystone/token/providers/pki.py b/keystone-moon/keystone/token/providers/pki.py
index 61b42817..af8dc739 100644
--- a/keystone-moon/keystone/token/providers/pki.py
+++ b/keystone-moon/keystone/token/providers/pki.py
@@ -48,6 +48,14 @@ class Provider(common.BaseProvider):
raise exception.UnexpectedError(_(
'Unable to sign token.'))
+ @property
+ def _supports_bind_authentication(self):
+ """Return if the token provider supports bind authentication methods.
+
+ :returns: True
+ """
+ return True
+
def needs_persistence(self):
"""Should the token be written to a backend."""
return True
diff --git a/keystone-moon/keystone/token/providers/pkiz.py b/keystone-moon/keystone/token/providers/pkiz.py
index b6f2944d..b4e31918 100644
--- a/keystone-moon/keystone/token/providers/pkiz.py
+++ b/keystone-moon/keystone/token/providers/pkiz.py
@@ -46,6 +46,14 @@ class Provider(common.BaseProvider):
LOG.exception(ERROR_MESSAGE)
raise exception.UnexpectedError(ERROR_MESSAGE)
+ @property
+ def _supports_bind_authentication(self):
+ """Return if the token provider supports bind authentication methods.
+
+ :returns: True
+ """
+ return True
+
def needs_persistence(self):
"""Should the token be written to a backend."""
return True
diff --git a/keystone-moon/keystone/token/providers/uuid.py b/keystone-moon/keystone/token/providers/uuid.py
index 15118d82..f9a91617 100644
--- a/keystone-moon/keystone/token/providers/uuid.py
+++ b/keystone-moon/keystone/token/providers/uuid.py
@@ -28,6 +28,14 @@ class Provider(common.BaseProvider):
def _get_token_id(self, token_data):
return uuid.uuid4().hex
+ @property
+ def _supports_bind_authentication(self):
+ """Return if the token provider supports bind authentication methods.
+
+ :returns: True
+ """
+ return True
+
def needs_persistence(self):
"""Should the token be written to a backend."""
return True
diff --git a/keystone-moon/keystone/token/utils.py b/keystone-moon/keystone/token/utils.py
new file mode 100644
index 00000000..96a09246
--- /dev/null
+++ b/keystone-moon/keystone/token/utils.py
@@ -0,0 +1,27 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from keystoneclient.common import cms
+from oslo_config import cfg
+
+
+def generate_unique_id(token_id):
+ """Return a unique ID for a token.
+
+ The returned value is useful as the primary key of a database table,
+ memcache store, or other lookup table.
+
+ :returns: Given a PKI token, returns it's hashed value. Otherwise,
+ returns the passed-in value (such as a UUID token ID or an
+ existing hash).
+ """
+ return cms.cms_hash_token(token_id, mode=cfg.CONF.token.hash_algorithm)
diff --git a/keystone-moon/keystone/trust/backends/sql.py b/keystone-moon/keystone/trust/backends/sql.py
index 4f5ee2e5..95b18d40 100644
--- a/keystone-moon/keystone/trust/backends/sql.py
+++ b/keystone-moon/keystone/trust/backends/sql.py
@@ -16,6 +16,7 @@ import time
from oslo_log import log
from oslo_utils import timeutils
+from six.moves import range
from keystone.common import sql
from keystone import exception
@@ -135,15 +136,15 @@ class Trust(trust.Driver):
query = query.filter_by(deleted_at=None)
ref = query.first()
if ref is None:
- return None
+ raise exception.TrustNotFound(trust_id=trust_id)
if ref.expires_at is not None and not deleted:
now = timeutils.utcnow()
if now > ref.expires_at:
- return None
+ raise exception.TrustNotFound(trust_id=trust_id)
# Do not return trusts that can't be used anymore
if ref.remaining_uses is not None and not deleted:
if ref.remaining_uses <= 0:
- return None
+ raise exception.TrustNotFound(trust_id=trust_id)
trust_dict = ref.to_dict()
self._add_roles(trust_id, session, trust_dict)
diff --git a/keystone-moon/keystone/trust/controllers.py b/keystone-moon/keystone/trust/controllers.py
index 60e34ccd..39cf0110 100644
--- a/keystone-moon/keystone/trust/controllers.py
+++ b/keystone-moon/keystone/trust/controllers.py
@@ -16,18 +16,18 @@ import uuid
from oslo_config import cfg
from oslo_log import log
+from oslo_log import versionutils
from oslo_utils import timeutils
import six
from keystone import assignment
from keystone.common import controller
from keystone.common import dependency
+from keystone.common import utils
from keystone.common import validation
from keystone import exception
from keystone.i18n import _
-from keystone.models import token_model
from keystone import notifications
-from keystone.openstack.common import versionutils
from keystone.trust import schema
@@ -63,19 +63,15 @@ class TrustV3(controller.V3Controller):
return super(TrustV3, cls).base_url(context, path=path)
def _get_user_id(self, context):
- if 'token_id' in context:
- token_id = context['token_id']
- token_data = self.token_provider_api.validate_token(token_id)
- token_ref = token_model.KeystoneToken(token_id=token_id,
- token_data=token_data)
- return token_ref.user_id
- return None
+ try:
+ token_ref = utils.get_token_ref(context)
+ except exception.Unauthorized:
+ return None
+ return token_ref.user_id
def get_trust(self, context, trust_id):
user_id = self._get_user_id(context)
trust = self.trust_api.get_trust(trust_id)
- if not trust:
- raise exception.TrustNotFound(trust_id=trust_id)
_trustor_trustee_only(trust, user_id)
self._fill_in_roles(context, trust,
self.role_api.list_roles())
@@ -83,7 +79,7 @@ class TrustV3(controller.V3Controller):
def _fill_in_roles(self, context, trust, all_roles):
if trust.get('expires_at') is not None:
- trust['expires_at'] = (timeutils.isotime
+ trust['expires_at'] = (utils.isotime
(trust['expires_at'],
subsecond=True))
@@ -126,15 +122,12 @@ class TrustV3(controller.V3Controller):
@controller.protected()
@validation.validated(schema.trust_create, 'trust')
- def create_trust(self, context, trust=None):
+ def create_trust(self, context, trust):
"""Create a new trust.
The user creating the trust must be the trustor.
"""
- if not trust:
- raise exception.ValidationError(attribute='trust',
- target='request')
auth_context = context.get('environment',
{}).get('KEYSTONE_AUTH_CONTEXT', {})
@@ -206,15 +199,16 @@ class TrustV3(controller.V3Controller):
if not expiration_date.endswith('Z'):
expiration_date += 'Z'
try:
- return timeutils.parse_isotime(expiration_date)
+ expiration_time = timeutils.parse_isotime(expiration_date)
except ValueError:
raise exception.ValidationTimeStampError()
+ if timeutils.is_older_than(expiration_time, 0):
+ raise exception.ValidationExpirationError()
+ return expiration_time
def _check_role_for_trust(self, context, trust_id, role_id):
"""Checks if a role has been assigned to a trust."""
trust = self.trust_api.get_trust(trust_id)
- if not trust:
- raise exception.TrustNotFound(trust_id=trust_id)
user_id = self._get_user_id(context)
_trustor_trustee_only(trust, user_id)
if not any(role['id'] == role_id for role in trust['roles']):
@@ -247,7 +241,7 @@ class TrustV3(controller.V3Controller):
if 'roles' in trust:
del trust['roles']
if trust.get('expires_at') is not None:
- trust['expires_at'] = (timeutils.isotime
+ trust['expires_at'] = (utils.isotime
(trust['expires_at'],
subsecond=True))
return TrustV3.wrap_collection(context, trusts)
@@ -255,9 +249,6 @@ class TrustV3(controller.V3Controller):
@controller.protected()
def delete_trust(self, context, trust_id):
trust = self.trust_api.get_trust(trust_id)
- if not trust:
- raise exception.TrustNotFound(trust_id=trust_id)
-
user_id = self._get_user_id(context)
_admin_trustor_only(context, trust, user_id)
initiator = notifications._get_request_audit_info(context)
@@ -266,8 +257,6 @@ class TrustV3(controller.V3Controller):
@controller.protected()
def list_roles_for_trust(self, context, trust_id):
trust = self.get_trust(context, trust_id)['trust']
- if not trust:
- raise exception.TrustNotFound(trust_id=trust_id)
user_id = self._get_user_id(context)
_trustor_trustee_only(trust, user_id)
return {'roles': trust['roles'],
diff --git a/keystone-moon/keystone/trust/core.py b/keystone-moon/keystone/trust/core.py
index de6b6d85..e091ff93 100644
--- a/keystone-moon/keystone/trust/core.py
+++ b/keystone-moon/keystone/trust/core.py
@@ -12,13 +12,14 @@
# License for the specific language governing permissions and limitations
# under the License.
-"""Main entry point into the Identity service."""
+"""Main entry point into the Trust service."""
import abc
from oslo_config import cfg
from oslo_log import log
import six
+from six.moves import zip
from keystone.common import dependency
from keystone.common import manager
@@ -41,6 +42,9 @@ class Manager(manager.Manager):
dynamically calls the backend.
"""
+
+ driver_namespace = 'keystone.trust'
+
_TRUST = "OS-TRUST:trust"
def __init__(self):
@@ -55,9 +59,9 @@ class Manager(manager.Manager):
if not (0 < redelegation_depth <= max_redelegation_count):
raise exception.Forbidden(
_('Remaining redelegation depth of %(redelegation_depth)d'
- ' out of allowed range of [0..%(max_count)d]'),
- redelegation_depth=redelegation_depth,
- max_count=max_redelegation_count)
+ ' out of allowed range of [0..%(max_count)d]') %
+ {'redelegation_depth': redelegation_depth,
+ 'max_count': max_redelegation_count})
# remaining_uses is None
remaining_uses = trust.get('remaining_uses')
@@ -139,13 +143,14 @@ class Manager(manager.Manager):
if requested_count and requested_count > max_redelegation_count:
raise exception.Forbidden(
_('Requested redelegation depth of %(requested_count)d '
- 'is greater than allowed %(max_count)d'),
- requested_count=requested_count,
- max_count=max_redelegation_count)
+ 'is greater than allowed %(max_count)d') %
+ {'requested_count': requested_count,
+ 'max_count': max_redelegation_count})
# Decline remaining_uses
- if 'remaining_uses' in trust:
- exception.ValidationError(_('remaining_uses must not be set '
- 'if redelegation is allowed'))
+ if trust.get('remaining_uses') is not None:
+ raise exception.ValidationError(
+ _('remaining_uses must not be set if redelegation is '
+ 'allowed'))
if redelegated_trust:
trust['redelegated_trust_id'] = redelegated_trust['id']
@@ -179,9 +184,6 @@ class Manager(manager.Manager):
Recursively remove given and redelegated trusts
"""
trust = self.driver.get_trust(trust_id)
- if not trust:
- raise exception.TrustNotFound(trust_id)
-
trusts = self.driver.list_trusts_for_trustor(
trust['trustor_user_id'])
diff --git a/keystone-moon/keystone/trust/schema.py b/keystone-moon/keystone/trust/schema.py
index 087cd1e9..673b786b 100644
--- a/keystone-moon/keystone/trust/schema.py
+++ b/keystone-moon/keystone/trust/schema.py
@@ -15,8 +15,11 @@ from keystone.common.validation import parameter_types
_trust_properties = {
- 'trustor_user_id': parameter_types.id_string,
- 'trustee_user_id': parameter_types.id_string,
+ # NOTE(lbragstad): These are set as external_id_string because they have
+ # the ability to be read as LDAP user identifiers, which could be something
+ # other than uuid.
+ 'trustor_user_id': parameter_types.external_id_string,
+ 'trustee_user_id': parameter_types.external_id_string,
'impersonation': parameter_types.boolean,
'project_id': validation.nullable(parameter_types.id_string),
'remaining_uses': {
diff --git a/keystone-moon/rally-jobs/README.rst b/keystone-moon/rally-jobs/README.rst
new file mode 100644
index 00000000..2c4a4848
--- /dev/null
+++ b/keystone-moon/rally-jobs/README.rst
@@ -0,0 +1,5 @@
+This directory contains rally benchmark scenarios to be run by OpenStack CI.
+
+
+* more about rally: https://wiki.openstack.org/wiki/Rally
+* how to add rally-gates: https://wiki.openstack.org/wiki/Rally/RallyGates
diff --git a/keystone-moon/rally-jobs/keystone.yaml b/keystone-moon/rally-jobs/keystone.yaml
new file mode 100644
index 00000000..9e656aad
--- /dev/null
+++ b/keystone-moon/rally-jobs/keystone.yaml
@@ -0,0 +1,167 @@
+---
+
+ KeystoneBasic.create_user:
+ -
+ args:
+ name_length: 10
+ runner:
+ type: "constant"
+ times: 100
+ concurrency: 10
+ sla:
+ failure_rate:
+ max: 0
+
+ KeystoneBasic.create_delete_user:
+ -
+ args:
+ name_length: 10
+ runner:
+ type: "constant"
+ times: 100
+ concurrency: 10
+ sla:
+ failure_rate:
+ max: 0
+
+ KeystoneBasic.create_and_list_users:
+ -
+ args:
+ name_length: 10
+ runner:
+ type: "constant"
+ times: 100
+ concurrency: 10
+ sla:
+ failure_rate:
+ max: 0
+
+ KeystoneBasic.create_user_update_password:
+ -
+ args:
+ name_length: 10
+ password_length: 10
+ runner:
+ type: "constant"
+ times: 100
+ concurrency: 10
+ sla:
+ failure_rate:
+ max: 0
+
+ KeystoneBasic.create_and_list_tenants:
+ -
+ args:
+ name_length: 10
+ runner:
+ type: "constant"
+ times: 100
+ concurrency: 10
+ sla:
+ failure_rate:
+ max: 0
+
+ KeystoneBasic.get_entities:
+ -
+ runner:
+ type: "constant"
+ times: 100
+ concurrency: 10
+ sla:
+ failure_rate:
+ max: 0
+
+ KeystoneBasic.add_and_remove_user_role:
+ -
+ runner:
+ type: "constant"
+ times: 100
+ concurrency: 10
+ context:
+ users:
+ tenants: 5
+ users_per_tenant: 4
+ sla:
+ failure_rate:
+ max: 0
+
+ KeystoneBasic.create_and_delete_role:
+ -
+ runner:
+ type: "constant"
+ times: 100
+ concurrency: 10
+ sla:
+ failure_rate:
+ max: 0
+
+ KeystoneBasic.create_add_and_list_user_roles:
+ -
+ runner:
+ type: "constant"
+ times: 100
+ concurrency: 10
+ context:
+ users:
+ tenants: 5
+ users_per_tenant: 4
+ sla:
+ failure_rate:
+ max: 0
+
+ KeystoneBasic.create_tenant:
+ -
+ args:
+ name_length: 10
+ runner:
+ type: "constant"
+ times: 50
+ concurrency: 10
+ sla:
+ failure_rate:
+ max: 0
+
+ KeystoneBasic.create_tenant_with_users:
+ -
+ args:
+ name_length: 10
+ users_per_tenant: 10
+ runner:
+ type: "constant"
+ times: 50
+ concurrency: 10
+ sla:
+ failure_rate:
+ max: 0
+
+ KeystoneBasic.create_update_and_delete_tenant:
+ -
+ args:
+ name_length: 10
+ runner:
+ type: "constant"
+ times: 50
+ concurrency: 10
+ sla:
+ failure_rate:
+ max: 0
+
+ KeystoneBasic.create_and_delete_service:
+ -
+ runner:
+ type: "constant"
+ times: 50
+ concurrency: 10
+ sla:
+ failure_rate:
+ max: 0
+
+ KeystoneBasic.create_and_list_services:
+ -
+ runner:
+ type: "constant"
+ times: 50
+ concurrency: 10
+ sla:
+ failure_rate:
+ max: 0
diff --git a/keystone-moon/requirements.txt b/keystone-moon/requirements.txt
index 14236443..d83eed6a 100644
--- a/keystone-moon/requirements.txt
+++ b/keystone-moon/requirements.txt
@@ -2,36 +2,36 @@
# of appearance. Changing the order has an impact on the overall integration
# process, which may cause wedges in the gate later.
-pbr>=0.6,!=0.7,<1.0
+pbr<2.0,>=1.4
WebOb>=1.2.3
-eventlet>=0.16.1
+eventlet>=0.17.4
greenlet>=0.3.2
-netaddr>=0.7.12
PasteDeploy>=1.5.0
Paste
-Routes>=1.12.3,!=2.0
-cryptography>=0.4 # Apache-2.0
+Routes!=2.0,!=2.1,>=1.12.3;python_version=='2.7'
+Routes!=2.0,>=1.12.3;python_version!='2.7'
+cryptography>=0.9.1 # Apache-2.0
six>=1.9.0
-SQLAlchemy>=0.9.7,<=0.9.99
-sqlalchemy-migrate>=0.9.5
+SQLAlchemy<1.1.0,>=0.9.7
+sqlalchemy-migrate>=0.9.6
+stevedore>=1.5.0 # Apache-2.0
passlib
-iso8601>=0.1.9
-python-keystoneclient>=1.1.0
-keystonemiddleware>=1.0.0
-oslo.concurrency>=1.4.1 # Apache-2.0
-oslo.config>=1.9.0 # Apache-2.0
-oslo.messaging>=1.6.0 # Apache-2.0
-oslo.db>=1.5.0 # Apache-2.0
-oslo.i18n>=1.3.0 # Apache-2.0
-oslo.log>=0.4.0 # Apache-2.0
-oslo.middleware>=0.3.0 # Apache-2.0
-oslo.policy>=0.3.0 # Apache-2.0
-oslo.serialization>=1.2.0 # Apache-2.0
-oslo.utils>=1.2.0 # Apache-2.0
+python-keystoneclient>=1.6.0
+keystonemiddleware>=2.0.0
+oslo.concurrency>=2.3.0 # Apache-2.0
+oslo.config>=2.1.0 # Apache-2.0
+oslo.messaging!=1.17.0,!=1.17.1,>=1.16.0 # Apache-2.0
+oslo.db>=2.0 # Apache-2.0
+oslo.i18n>=1.5.0 # Apache-2.0
+oslo.log>=1.8.0 # Apache-2.0
+oslo.middleware>=2.4.0 # Apache-2.0
+oslo.policy>=0.5.0 # Apache-2.0
+oslo.serialization>=1.4.0 # Apache-2.0
+oslo.service>=0.6.0 # Apache-2.0
+oslo.utils>=2.0.0 # Apache-2.0
oauthlib>=0.6
-pysaml2
-dogpile.cache>=0.5.3
-jsonschema>=2.0.0,<3.0.0
+pysaml2>=2.4.0
+dogpile.cache>=0.5.4
+jsonschema!=2.5.0,<3.0.0,>=2.0.0
pycadf>=0.8.0
-posix_ipc
msgpack-python>=0.4.0
diff --git a/keystone-moon/setup.cfg b/keystone-moon/setup.cfg
index e646480c..4acc4fee 100644
--- a/keystone-moon/setup.cfg
+++ b/keystone-moon/setup.cfg
@@ -1,6 +1,6 @@
[metadata]
name = keystone
-version = 2015.1
+version = 8.0.0
summary = OpenStack Identity
description-file =
README.rst
@@ -20,9 +20,17 @@ classifier =
[files]
packages =
keystone
-scripts =
- bin/keystone-all
- bin/keystone-manage
+
+[extras]
+ldap =
+ python-ldap>=2.4:python_version=='2.7'
+ ldappool>=1.0 # MPL
+memcache =
+ python-memcached>=1.56
+mongodb =
+ pymongo>=3.0.2
+bandit =
+ bandit>=0.13.2
[global]
setup-hooks =
@@ -55,15 +63,141 @@ output_file = keystone/locale/keystone.pot
copyright_holder = OpenStack Foundation
msgid_bugs_address = https://bugs.launchpad.net/keystone
-# NOTE(dstanek): Uncomment the [pbr] section below and remove the ext.apidoc
-# Sphinx extension when https://launchpad.net/bugs/1260495 is fixed.
[pbr]
warnerrors = True
-#autodoc_tree_index_modules = True
-#autodoc_tree_root = ./keystone
+autodoc_tree_index_modules = True
[entry_points]
+console_scripts =
+ keystone-all = keystone.cmd.all:main
+ keystone-manage = keystone.cmd.manage:main
+
+wsgi_scripts =
+ keystone-wsgi-admin = keystone.server.wsgi:initialize_admin_application
+ keystone-wsgi-public = keystone.server.wsgi:initialize_public_application
+
+keystone.assignment =
+ ldap = keystone.assignment.backends.ldap:Assignment
+ sql = keystone.assignment.backends.sql:Assignment
+
+keystone.auth.external =
+ default = keystone.auth.plugins.external:DefaultDomain
+ DefaultDomain = keystone.auth.plugins.external:DefaultDomain
+ Domain = keystone.auth.plugins.external:Domain
+
+keystone.auth.kerberos =
+ default = keystone.auth.plugins.external:KerberosDomain
+
+keystone.auth.oauth1 =
+ default = keystone.auth.plugins.oauth1:OAuth
+
+keystone.auth.openid =
+ default = keystone.auth.plugins.mapped:Mapped
+
+keystone.auth.password =
+ default = keystone.auth.plugins.password:Password
+
+keystone.auth.saml2 =
+ default = keystone.auth.plugins.mapped:Mapped
+
+keystone.auth.token =
+ default = keystone.auth.plugins.token:Token
+
+keystone.auth.x509 =
+ default = keystone.auth.plugins.mapped:Mapped
+
+keystone.catalog =
+ kvs = keystone.catalog.backends.kvs:Catalog
+ sql = keystone.catalog.backends.sql:Catalog
+ templated = keystone.catalog.backends.templated:Catalog
+ endpoint_filter.sql = keystone.contrib.endpoint_filter.backends.catalog_sql:EndpointFilterCatalog
+
+keystone.credential =
+ sql = keystone.credential.backends.sql:Credential
+
+keystone.identity =
+ ldap = keystone.identity.backends.ldap:Identity
+ sql = keystone.identity.backends.sql:Identity
+
+keystone.identity.id_generator =
+ sha256 = keystone.identity.id_generators.sha256:Generator
+
+keystone.identity.id_mapping =
+ sql = keystone.identity.mapping_backends.sql:Mapping
+
+keystone.policy =
+ rules = keystone.policy.backends.rules:Policy
+ sql = keystone.policy.backends.sql:Policy
+
+keystone.resource =
+ ldap = keystone.resource.backends.ldap:Resource
+ sql = keystone.resource.backends.sql:Resource
+
+keystone.resource.domain_config =
+ sql = keystone.resource.config_backends.sql:DomainConfig
+
+keystone.role =
+ ldap = keystone.assignment.role_backends.ldap:Role
+ sql = keystone.assignment.role_backends.sql:Role
+
+keystone.token.persistence =
+ kvs = keystone.token.persistence.backends.kvs:Token
+ memcache = keystone.token.persistence.backends.memcache:Token
+ memcache_pool = keystone.token.persistence.backends.memcache_pool:Token
+ sql = keystone.token.persistence.backends.sql:Token
+
+keystone.token.provider =
+ fernet = keystone.token.providers.fernet:Provider
+ uuid = keystone.token.providers.uuid:Provider
+ pki = keystone.token.providers.pki:Provider
+ pkiz = keystone.token.providers.pkiz:Provider
+
+keystone.trust =
+ sql = keystone.trust.backends.sql:Trust
+
+keystone.endpoint_filter =
+ sql = keystone.contrib.endpoint_filter.backends.sql:EndpointFilter
+
+keystone.endpoint_policy =
+ sql = keystone.endpoint_policy.backends.sql:EndpointPolicy
+
+keystone.federation =
+ sql = keystone.contrib.federation.backends.sql:Federation
+
+keystone.oauth1 =
+ sql = keystone.contrib.oauth1.backends.sql:OAuth1
+
+keystone.revoke =
+ kvs = keystone.contrib.revoke.backends.kvs:Revoke
+ sql = keystone.contrib.revoke.backends.sql:Revoke
+
oslo.config.opts =
keystone = keystone.common.config:list_opts
keystone.notifications = keystone.notifications:list_opts
- keystone.openstack.common.eventlet_backdoor = keystone.openstack.common.eventlet_backdoor:list_opts
+
+paste.filter_factory =
+ admin_token_auth = keystone.middleware:AdminTokenAuthMiddleware.factory
+ build_auth_context = keystone.middleware:AuthContextMiddleware.factory
+ crud_extension = keystone.contrib.admin_crud:CrudExtension.factory
+ debug = keystone.common.wsgi:Debug.factory
+ endpoint_filter_extension = keystone.contrib.endpoint_filter.routers:EndpointFilterExtension.factory
+ ec2_extension = keystone.contrib.ec2:Ec2Extension.factory
+ ec2_extension_v3 = keystone.contrib.ec2:Ec2ExtensionV3.factory
+ federation_extension = keystone.contrib.federation.routers:FederationExtension.factory
+ json_body = keystone.middleware:JsonBodyMiddleware.factory
+ oauth1_extension = keystone.contrib.oauth1.routers:OAuth1Extension.factory
+ request_id = oslo_middleware:RequestId.factory
+ revoke_extension = keystone.contrib.revoke.routers:RevokeExtension.factory
+ s3_extension = keystone.contrib.s3:S3Extension.factory
+ simple_cert_extension = keystone.contrib.simple_cert:SimpleCertExtension.factory
+ sizelimit = oslo_middleware.sizelimit:RequestBodySizeLimiter.factory
+ token_auth = keystone.middleware:TokenAuthMiddleware.factory
+ url_normalize = keystone.middleware:NormalizingFilter.factory
+ user_crud_extension = keystone.contrib.user_crud:CrudExtension.factory
+
+paste.app_factory =
+ admin_service = keystone.service:admin_app_factory
+ admin_version_service = keystone.service:admin_version_app_factory
+ public_service = keystone.service:public_app_factory
+ public_version_service = keystone.service:public_version_app_factory
+ service_v3 = keystone.service:v3_app_factory
diff --git a/keystone-moon/setup.py b/keystone-moon/setup.py
index 73637574..d8080d05 100644
--- a/keystone-moon/setup.py
+++ b/keystone-moon/setup.py
@@ -1,4 +1,3 @@
-#!/usr/bin/env python
# Copyright (c) 2013 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License");
@@ -26,5 +25,5 @@ except ImportError:
pass
setuptools.setup(
- setup_requires=['pbr'],
+ setup_requires=['pbr>=1.3'],
pbr=True)
diff --git a/keystone-moon/test-requirements.txt b/keystone-moon/test-requirements.txt
index 170cb41c..326145c2 100644
--- a/keystone-moon/test-requirements.txt
+++ b/keystone-moon/test-requirements.txt
@@ -2,35 +2,20 @@
# of appearance. Changing the order has an impact on the overall integration
# process, which may cause wedges in the gate later.
-hacking>=0.10.0,<0.11
+hacking<0.11,>=0.10.0
bashate>=0.2 # Apache-2.0
-# Optional backend: SQL
-pysqlite
-
-# Optional backend: Memcache
-python-memcached>=1.48
-
-# Optional dogpile backend: MongoDB
-pymongo>=2.5
-
-# Optional backend: LDAP
-# authenticate against an existing LDAP server
-python-ldap>=2.4
-ldappool>=1.0 # MPL
-
-# Testing
# computes code coverage percentages
coverage>=3.6
# fixture stubbing
-fixtures>=0.3.14
+fixtures>=1.3.1
# xml parsing
lxml>=2.3
# mock object framework
-mock>=1.0
-oslotest>=1.2.0 # Apache-2.0
+mock>=1.2
+oslotest>=1.10.0 # Apache-2.0
# required to build documentation
-sphinx>=1.1.2,!=1.2.0,!=1.3b1,<1.3
+sphinx!=1.2.0,!=1.3b1,<1.3,>=1.1.2
# test wsgi apps without starting an http server
WebTest>=2.0
@@ -38,13 +23,14 @@ WebTest>=2.0
# discussion: http://lists.openstack.org/pipermail/openstack-dev/2013-July/012484.html
#mox>=0.5.3
-discover
python-subunit>=0.0.18
testrepository>=0.0.18
-testtools>=0.9.36,!=1.2.0
+testtools>=1.4.0
# For documentation
-oslosphinx>=2.2.0 # Apache-2.0
+oslosphinx>=2.5.0 # Apache-2.0
-tempest-lib>=0.3.0
+tempest-lib>=0.6.1
+# Functional tests.
+requests>=2.5.2
diff --git a/keystone-moon/tools/pretty_tox.sh b/keystone-moon/tools/pretty_tox.sh
index 01b67a8d..01b67a8d 100644..100755
--- a/keystone-moon/tools/pretty_tox.sh
+++ b/keystone-moon/tools/pretty_tox.sh
diff --git a/keystone-moon/tools/sample_data.sh b/keystone-moon/tools/sample_data.sh
index 55ab9d21..bb1eada8 100755
--- a/keystone-moon/tools/sample_data.sh
+++ b/keystone-moon/tools/sample_data.sh
@@ -14,14 +14,14 @@
# License for the specific language governing permissions and limitations
# under the License.
-# Sample initial data for Keystone using python-keystoneclient
+# Sample initial data for Keystone using python-openstackclient
#
# This script is based on the original DevStack keystone_data.sh script.
#
# It demonstrates how to bootstrap Keystone with an administrative user
-# using the OS_SERVICE_TOKEN and OS_SERVICE_ENDPOINT environment variables
-# and the administrative API. It will get the admin_token (OS_SERVICE_TOKEN)
-# and admin_port from keystone.conf if available.
+# using the OS_TOKEN and OS_URL environment variables and the administrative
+# API. It will get the admin_token (OS_TOKEN) and admin_port from
+# keystone.conf if available.
#
# Disable creation of endpoints by setting DISABLE_ENDPOINTS environment variable.
# Use this with the Catalog Templated backend.
@@ -42,6 +42,12 @@
# environment variables. A common default password for all the services can be used by
# setting the "SERVICE_PASSWORD" environment variable.
+# Test to verify that the openstackclient is installed, if not exit
+type openstack >/dev/null 2>&1 || {
+ echo >&2 "openstackclient is not installed. Please install it to use this script. Aborting."
+ exit 1
+ }
+
ADMIN_PASSWORD=${ADMIN_PASSWORD:-secrete}
NOVA_PASSWORD=${NOVA_PASSWORD:-${SERVICE_PASSWORD:-nova}}
GLANCE_PASSWORD=${GLANCE_PASSWORD:-${SERVICE_PASSWORD:-glance}}
@@ -79,14 +85,14 @@ if [[ -r "$KEYSTONE_CONF" ]]; then
fi
fi
-export OS_SERVICE_TOKEN=${OS_SERVICE_TOKEN:-$CONFIG_SERVICE_TOKEN}
-if [[ -z "$OS_SERVICE_TOKEN" ]]; then
+export OS_TOKEN=${OS_TOKEN:-$CONFIG_SERVICE_TOKEN}
+if [[ -z "$OS_TOKEN" ]]; then
echo "No service token found."
- echo "Set OS_SERVICE_TOKEN manually from keystone.conf admin_token."
+ echo "Set OS_TOKEN manually from keystone.conf admin_token."
exit 1
fi
-export OS_SERVICE_ENDPOINT=${OS_SERVICE_ENDPOINT:-http://$CONTROLLER_PUBLIC_ADDRESS:${CONFIG_ADMIN_PORT:-35357}/v2.0}
+export OS_URL=${OS_URL:-http://$CONTROLLER_PUBLIC_ADDRESS:${CONFIG_ADMIN_PORT:-35357}/v2.0}
function get_id () {
echo `"$@" | grep ' id ' | awk '{print $4}'`
@@ -95,141 +101,139 @@ function get_id () {
#
# Default tenant
#
-DEMO_TENANT=$(get_id keystone tenant-create --name=demo \
- --description "Default Tenant")
+openstack project create demo \
+ --description "Default Tenant"
-ADMIN_USER=$(get_id keystone user-create --name=admin \
- --pass="${ADMIN_PASSWORD}")
+openstack user create admin --project demo \
+ --password "${ADMIN_PASSWORD}"
-ADMIN_ROLE=$(get_id keystone role-create --name=admin)
+openstack role create admin
-keystone user-role-add --user-id $ADMIN_USER \
- --role-id $ADMIN_ROLE \
- --tenant-id $DEMO_TENANT
+openstack role add --user admin \
+ --project demo\
+ admin
#
# Service tenant
#
-SERVICE_TENANT=$(get_id keystone tenant-create --name=service \
- --description "Service Tenant")
+openstack project create service \
+ --description "Service Tenant"
-GLANCE_USER=$(get_id keystone user-create --name=glance \
- --pass="${GLANCE_PASSWORD}")
+openstack user create glance --project service\
+ --password "${GLANCE_PASSWORD}"
-keystone user-role-add --user-id $GLANCE_USER \
- --role-id $ADMIN_ROLE \
- --tenant-id $SERVICE_TENANT
+openstack role add --user glance \
+ --project service \
+ admin
-NOVA_USER=$(get_id keystone user-create --name=nova \
- --pass="${NOVA_PASSWORD}" \
- --tenant-id $SERVICE_TENANT)
+openstack user create nova --project service\
+ --password "${NOVA_PASSWORD}"
-keystone user-role-add --user-id $NOVA_USER \
- --role-id $ADMIN_ROLE \
- --tenant-id $SERVICE_TENANT
+openstack role add --user nova \
+ --project service \
+ admin
-EC2_USER=$(get_id keystone user-create --name=ec2 \
- --pass="${EC2_PASSWORD}" \
- --tenant-id $SERVICE_TENANT)
+openstack user create ec2 --project service \
+ --password "${EC2_PASSWORD}"
-keystone user-role-add --user-id $EC2_USER \
- --role-id $ADMIN_ROLE \
- --tenant-id $SERVICE_TENANT
+openstack role add --user ec2 \
+ --project service \
+ admin
-SWIFT_USER=$(get_id keystone user-create --name=swift \
- --pass="${SWIFT_PASSWORD}" \
- --tenant-id $SERVICE_TENANT)
+openstack user create swift --project service \
+ --password "${SWIFT_PASSWORD}" \
-keystone user-role-add --user-id $SWIFT_USER \
- --role-id $ADMIN_ROLE \
- --tenant-id $SERVICE_TENANT
+openstack role add --user swift \
+ --project service \
+ admin
#
# Keystone service
#
-KEYSTONE_SERVICE=$(get_id \
-keystone service-create --name=keystone \
- --type=identity \
- --description="Keystone Identity Service")
+openstack service create --name keystone \
+ --description "Keystone Identity Service" \
+ identity
if [[ -z "$DISABLE_ENDPOINTS" ]]; then
- keystone endpoint-create --region RegionOne --service-id $KEYSTONE_SERVICE \
+ openstack endpoint create --region RegionOne \
--publicurl "http://$CONTROLLER_PUBLIC_ADDRESS:\$(public_port)s/v2.0" \
--adminurl "http://$CONTROLLER_ADMIN_ADDRESS:\$(admin_port)s/v2.0" \
- --internalurl "http://$CONTROLLER_INTERNAL_ADDRESS:\$(public_port)s/v2.0"
+ --internalurl "http://$CONTROLLER_INTERNAL_ADDRESS:\$(public_port)s/v2.0" \
+ keystone
fi
#
# Nova service
#
-NOVA_SERVICE=$(get_id \
-keystone service-create --name=nova \
- --type=compute \
- --description="Nova Compute Service")
+openstack service create --name=nova \
+ --description="Nova Compute Service" \
+ compute
if [[ -z "$DISABLE_ENDPOINTS" ]]; then
- keystone endpoint-create --region RegionOne --service-id $NOVA_SERVICE \
+ openstack endpoint create --region RegionOne \
--publicurl "http://$CONTROLLER_PUBLIC_ADDRESS:8774/v2/\$(tenant_id)s" \
--adminurl "http://$CONTROLLER_ADMIN_ADDRESS:8774/v2/\$(tenant_id)s" \
- --internalurl "http://$CONTROLLER_INTERNAL_ADDRESS:8774/v2/\$(tenant_id)s"
+ --internalurl "http://$CONTROLLER_INTERNAL_ADDRESS:8774/v2/\$(tenant_id)s" \
+ nova
fi
#
# Volume service
#
-VOLUME_SERVICE=$(get_id \
-keystone service-create --name=volume \
- --type=volume \
- --description="Nova Volume Service")
+openstack service create --name=volume \
+ --description="Cinder Volume Service" \
+ volume
if [[ -z "$DISABLE_ENDPOINTS" ]]; then
- keystone endpoint-create --region RegionOne --service-id $VOLUME_SERVICE \
+ openstack endpoint create --region RegionOne \
--publicurl "http://$CONTROLLER_PUBLIC_ADDRESS:8776/v1/\$(tenant_id)s" \
--adminurl "http://$CONTROLLER_ADMIN_ADDRESS:8776/v1/\$(tenant_id)s" \
- --internalurl "http://$CONTROLLER_INTERNAL_ADDRESS:8776/v1/\$(tenant_id)s"
+ --internalurl "http://$CONTROLLER_INTERNAL_ADDRESS:8776/v1/\$(tenant_id)s" \
+ volume
fi
#
# Image service
#
-GLANCE_SERVICE=$(get_id \
-keystone service-create --name=glance \
- --type=image \
- --description="Glance Image Service")
+openstack service create --name=glance \
+ --description="Glance Image Service" \
+ image
if [[ -z "$DISABLE_ENDPOINTS" ]]; then
- keystone endpoint-create --region RegionOne --service-id $GLANCE_SERVICE \
+ openstack endpoint create --region RegionOne \
--publicurl "http://$CONTROLLER_PUBLIC_ADDRESS:9292" \
--adminurl "http://$CONTROLLER_ADMIN_ADDRESS:9292" \
- --internalurl "http://$CONTROLLER_INTERNAL_ADDRESS:9292"
+ --internalurl "http://$CONTROLLER_INTERNAL_ADDRESS:9292" \
+ glance
fi
#
# EC2 service
#
-EC2_SERVICE=$(get_id \
-keystone service-create --name=ec2 \
- --type=ec2 \
- --description="EC2 Compatibility Layer")
+openstack service create --name=ec2 \
+ --description="EC2 Compatibility Layer" \
+ ec2
if [[ -z "$DISABLE_ENDPOINTS" ]]; then
- keystone endpoint-create --region RegionOne --service-id $EC2_SERVICE \
+ openstack endpoint create --region RegionOne \
--publicurl "http://$CONTROLLER_PUBLIC_ADDRESS:8773/services/Cloud" \
--adminurl "http://$CONTROLLER_ADMIN_ADDRESS:8773/services/Admin" \
- --internalurl "http://$CONTROLLER_INTERNAL_ADDRESS:8773/services/Cloud"
+ --internalurl "http://$CONTROLLER_INTERNAL_ADDRESS:8773/services/Cloud" \
+ ec2
fi
#
# Swift service
#
-SWIFT_SERVICE=$(get_id \
-keystone service-create --name=swift \
- --type="object-store" \
- --description="Swift Service")
+openstack service create --name=swift \
+ --description="Swift Object Storage Service" \
+ object-store
if [[ -z "$DISABLE_ENDPOINTS" ]]; then
- keystone endpoint-create --region RegionOne --service-id $SWIFT_SERVICE \
+ openstack endpoint create --region RegionOne \
--publicurl "http://$CONTROLLER_PUBLIC_ADDRESS:8080/v1/AUTH_\$(tenant_id)s" \
--adminurl "http://$CONTROLLER_ADMIN_ADDRESS:8080/v1" \
- --internalurl "http://$CONTROLLER_INTERNAL_ADDRESS:8080/v1/AUTH_\$(tenant_id)s"
+ --internalurl "http://$CONTROLLER_INTERNAL_ADDRESS:8080/v1/AUTH_\$(tenant_id)s" \
+ swift
fi
# create ec2 creds and parse the secret and access key returned
-RESULT=$(keystone ec2-credentials-create --tenant-id=$SERVICE_TENANT --user-id=$ADMIN_USER)
+ADMIN_USER=$(get_id openstack user show admin)
+RESULT=$(openstack ec2 credentials create --project service --user $ADMIN_USER)
ADMIN_ACCESS=`echo "$RESULT" | grep access | awk '{print $4}'`
ADMIN_SECRET=`echo "$RESULT" | grep secret | awk '{print $4}'`
diff --git a/keystone-moon/tox.ini b/keystone-moon/tox.ini
index 366a6829..cea70b7b 100644
--- a/keystone-moon/tox.ini
+++ b/keystone-moon/tox.ini
@@ -1,77 +1,64 @@
[tox]
minversion = 1.6
skipsdist = True
-envlist = py27,py33,py34,pep8,docs,sample_config
+envlist = py27,py34,pep8,docs,genconfig
[testenv]
usedevelop = True
install_command = pip install -U {opts} {packages}
setenv = VIRTUAL_ENV={envdir}
-deps = -r{toxinidir}/requirements.txt
- -r{toxinidir}/test-requirements.txt
+deps = -r{toxinidir}/test-requirements.txt
+ .[ldap,memcache,mongodb]
commands = bash tools/pretty_tox.sh '{posargs}'
whitelist_externals = bash
-
-[testenv:py33]
-deps = -r{toxinidir}/requirements-py3.txt
- -r{toxinidir}/test-requirements-py3.txt
- nose
-commands =
- nosetests --with-coverage --cover-package=keystone \
- --exclude test_ldap \
- keystone/tests/test_auth_plugin.py \
- keystone/tests/test_backend.py \
- keystone/tests/test_backend_rules.py \
- keystone/tests/test_cache_backend_mongo.py \
- keystone/tests/test_driver_hints.py \
- keystone/tests/test_hacking_checks.py \
- keystone/tests/test_injection.py \
- keystone/tests/test_matchers.py \
- keystone/tests/test_policy.py \
- keystone/tests/test_singular_plural.py \
- keystone/tests/test_sizelimit.py \
- keystone/tests/test_sql_migrate_extensions.py \
- keystone/tests/test_token_bind.py \
- keystone/tests/test_url_middleware.py \
- keystone/tests/unit/common/test_utils.py \
- keystone/tests/test_validation.py \
- keystone/tests/test_v3_controller.py \
- keystone/tests/test_wsgi.py \
- keystone/tests/unit
+passenv = http_proxy HTTP_PROXY https_proxy HTTPS_PROXY no_proxy NO_PROXY
[testenv:py34]
-deps = -r{toxinidir}/requirements-py3.txt
- -r{toxinidir}/test-requirements-py3.txt
+deps = -r{toxinidir}/test-requirements.txt
nose
+ .[memcache,mongodb]
commands =
nosetests --with-coverage --cover-package=keystone \
- --exclude test_ldap \
- keystone/tests/test_auth_plugin.py \
- keystone/tests/test_backend.py \
- keystone/tests/test_backend_rules.py \
- keystone/tests/test_cache_backend_mongo.py \
- keystone/tests/test_driver_hints.py \
- keystone/tests/test_hacking_checks.py \
- keystone/tests/test_injection.py \
- keystone/tests/test_matchers.py \
- keystone/tests/test_policy.py \
- keystone/tests/test_singular_plural.py \
- keystone/tests/test_sizelimit.py \
- keystone/tests/test_sql_migrate_extensions.py \
- keystone/tests/test_token_bind.py \
- keystone/tests/test_url_middleware.py \
+ keystone/tests/unit/auth/test_controllers.py \
+ keystone/tests/unit/catalog/test_core.py \
+ keystone/tests/unit/common/test_base64utils.py \
+ keystone/tests/unit/common/test_injection.py \
+ keystone/tests/unit/common/test_json_home.py \
+ keystone/tests/unit/common/test_sql_core.py \
keystone/tests/unit/common/test_utils.py \
- keystone/tests/test_validation.py \
- keystone/tests/test_v3_controller.py \
- keystone/tests/test_wsgi.py \
- keystone/tests/unit
+ keystone/tests/unit/test_auth_plugin.py \
+ keystone/tests/unit/test_backend.py \
+ keystone/tests/unit/test_backend_endpoint_policy.py \
+ keystone/tests/unit/test_backend_rules.py \
+ keystone/tests/unit/test_cache_backend_mongo.py \
+ keystone/tests/unit/test_config.py \
+ keystone/tests/unit/test_contrib_s3_core.py \
+ keystone/tests/unit/test_driver_hints.py \
+ keystone/tests/unit/test_exception.py \
+ keystone/tests/unit/test_policy.py \
+ keystone/tests/unit/test_singular_plural.py \
+ keystone/tests/unit/test_sql_livetest.py \
+ keystone/tests/unit/test_sql_migrate_extensions.py \
+ keystone/tests/unit/test_sql_upgrade.py \
+ keystone/tests/unit/test_ssl.py \
+ keystone/tests/unit/test_token_bind.py \
+ keystone/tests/unit/test_url_middleware.py \
+ keystone/tests/unit/test_v3_controller.py \
+ keystone/tests/unit/test_validation.py \
+ keystone/tests/unit/test_wsgi.py \
+ keystone/tests/unit/tests/test_core.py \
+ keystone/tests/unit/tests/test_utils.py \
+ keystone/tests/unit/token/test_pki_provider.py \
+ keystone/tests/unit/token/test_pkiz_provider.py \
+ keystone/tests/unit/token/test_token_model.py \
+ keystone/tests/unit/token/test_uuid_provider.py
[testenv:pep8]
commands =
flake8 {posargs}
# Run bash8 during pep8 runs to ensure violations are caught by
# the check and gate queues
- bashate run_tests.sh examples/pki/gen_pki.sh
+ bashate examples/pki/gen_pki.sh
# Check that .po and .pot files are valid.
bash -c "find keystone -type f -regex '.*\.pot?' -print0| \
xargs -0 -n 1 msgfmt --check-format -o /dev/null"
@@ -88,6 +75,12 @@ commands = {posargs}
[testenv:debug]
commands = oslo_debug_helper {posargs}
+[testenv:functional]
+basepython = python3.4
+deps = -r{toxinidir}/test-requirements.txt
+setenv = OS_TEST_PATH=./keystone/tests/functional
+commands = python setup.py testr --slowest --testr-args='{posargs}'
+
[flake8]
filename= *.py,keystone-all,keystone-manage
show-source = true
@@ -100,12 +93,19 @@ max-complexity=24
[testenv:docs]
commands=
+ bash -c "rm -rf doc/build"
+ bash -c "rm -rf doc/source/api"
python setup.py build_sphinx
-[testenv:sample_config]
+[testenv:genconfig]
commands = oslo-config-generator --config-file=config-generator/keystone.conf
+[testenv:bandit]
+deps = .[bandit]
+commands = bandit -c bandit.yaml -r keystone -n5 -p keystone_conservative
+
[hacking]
import_exceptions =
keystone.i18n
-local-check-factory = keystone.hacking.checks.factory
+ six.moves
+local-check-factory = keystone.tests.hacking.checks.factory
diff --git a/moonclient/moonclient/tests.py b/moonclient/moonclient/tests.py
new file mode 100644
index 00000000..5b02576c
--- /dev/null
+++ b/moonclient/moonclient/tests.py
@@ -0,0 +1 @@
+__author__ = 'vdsq3226'
diff --git a/moonclient/moonclient/tests/functional_tests.json b/moonclient/moonclient/tests/functional_tests.json
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/moonclient/moonclient/tests/functional_tests.json